pydaptivefiltering

 1# pydaptivefiltering/__init__.py
 2
 3from .base import AdaptiveFilter
 4from .lms import *
 5from .rls import *
 6from .set_membership import *
 7from .lattice import *
 8from .fast_rls import *
 9from .qr_decomposition import *
10from .iir import *
11from .nonlinear import *
12from .subband import *
13from .blind import *
14from .kalman import *
15__version__ = "0.9"
16__author__ = "BruninLima"
17
18__all__ = ["AdaptiveFilter",
19    "LMS", "NLMS", "AffineProjection", "SignData", "SignError", "DualSign", 
20    "LMSNewton", "Power2ErrorLMS", "TDomainLMS", "TDomainDCT", "TDomainDFT",
21    "RLS", "RLSAlt",
22    "SMNLMS", "SMBNLMS", "SMAffineProjection", "SimplifiedSMPUAP", "SimplifiedSMAP",
23    "LRLSPosteriori", "LRLSErrorFeedback", "LRLSPriori", "NormalizedLRLS",
24    "FastRLS", "StabFastRLS",
25    "QRRLS",
26    "ErrorEquation", "GaussNewton", "GaussNewtonGradient", "RLSIIR", "SteiglitzMcBride",
27    "BilinearRLS", "ComplexRBF", "MultilayerPerceptron", "RBF", "VolterraLMS", "VolterraRLS",
28    "CFDLMS", "DLCLLMS", "OLSBLMS",
29    "AffineProjectionCM", "CMA", "Godard", "Sato",
30    "Kalman",
31    "info"]
32
33
34def info():
35    """Imprime informações sobre a cobertura de algoritmos da biblioteca."""
36    print("\n" + "="*70)
37    print("      PyDaptive Filtering - Complete Library Overview")
38    print("      Reference: 'Adaptive Filtering' by Paulo S. R. Diniz")
39    print("="*70)
40    sections = {
41        "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain",
42        "Cap 5 (RLS)": "Standard RLS, Alternative RLS",
43        "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP",
44        "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS",
45        "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS",
46        "Cap 9 (QR)": "QR-Decomposition Based RLS",
47        "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR",
48        "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS",
49        "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS",
50        "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection",
51        "Cap 17 (Kalman)": "Kalman Filter",
52    }
53    for cap, algs in sections.items():
54        print(f"\n{cap:25}: {algs}")
55    
56    print("\n" + "-"*70)
57    print("Usage example: from pydaptivefiltering import LMS")
58    print("Documentation: help(pydaptivefiltering.LMS)")
59    print("="*70 + "\n")
class AdaptiveFilter(abc.ABC):
134class AdaptiveFilter(ABC):
135    """Abstract base class for all adaptive filters.
136
137    Parameters
138    ----------
139    filter_order:
140        Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used
141        as a generic size indicator for base allocation.
142    w_init:
143        Initial coefficient vector. If None, initialized to zeros.
144
145    Notes
146    -----
147    - Subclasses should set `supports_complex = True` if they support complex-valued data.
148    - Subclasses are expected to call `_record_history()` every iteration (or use helper methods)
149      if they want coefficient trajectories.
150    """
151
152    supports_complex: bool = False
153
154    def __init__(self, filter_order: int, w_init: Optional[ArrayLike] = None) -> None:
155        self.filter_order: int = int(filter_order)
156        self._dtype = complex if self.supports_complex else float
157
158        self.regressor: np.ndarray = np.zeros(self.filter_order + 1, dtype=self._dtype)
159
160        if w_init is not None:
161            self.w: np.ndarray = np.asarray(w_init, dtype=self._dtype)
162        else:
163            self.w = np.zeros(self.filter_order + 1, dtype=self._dtype)
164
165        self.w_history: List[np.ndarray] = []
166        self._record_history()
167
168    def _record_history(self) -> None:
169        """Store a snapshot of current coefficients."""
170        self.w_history.append(np.asarray(self.w).copy())
171
172    def _final_coeffs(self, coefficients: Any) -> Any:
173        """Return last coefficients from a history container (list or 2D array)."""
174        if coefficients is None:
175            return None
176        if isinstance(coefficients, list) and len(coefficients) > 0:
177            return coefficients[-1]
178        try:
179            a = np.asarray(coefficients)
180            if a.ndim == 2:
181                return a[-1, :]
182        except Exception:
183            pass
184        return coefficients
185
186    def _pack_results(
187        self,
188        outputs: np.ndarray,
189        errors: np.ndarray,
190        runtime_s: float,
191        error_type: str = "a_priori",
192        extra: Optional[Dict[str, Any]] = None,
193    ) -> OptimizationResult:
194        """Centralized output packaging to standardize results."""
195        return OptimizationResult(
196            outputs=np.asarray(outputs),
197            errors=np.asarray(errors),
198            coefficients=np.asarray(self.w_history),
199            algorithm=self.__class__.__name__,
200            runtime_ms=float(runtime_s) * 1000.0,
201            error_type=str(error_type),
202            extra=extra,
203        )
204
205    def filter_signal(self, input_signal: ArrayLike) -> np.ndarray:
206        """Filter an input signal using current coefficients.
207
208        Default implementation assumes an FIR structure with taps `self.w` and
209        regressor convention:
210            x_k = [x[k], x[k-1], ..., x[k-m]]
211        and output:
212            y[k] = w^H x_k   (Hermitian for complex)
213        """
214        x = np.asarray(input_signal, dtype=self._dtype)
215        n_samples = x.size
216        y = np.zeros(n_samples, dtype=self._dtype)
217
218        x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype)
219        x_padded[self.filter_order:] = x
220
221        for k in range(n_samples):
222            x_k = x_padded[k : k + self.filter_order + 1][::-1]
223            y[k] = np.dot(self.w.conj(), x_k)
224
225        return y
226
227    @classmethod
228    def default_test_init_kwargs(cls, order: int) -> dict:
229        """Override in subclasses to provide init kwargs for standardized tests."""
230        return {}
231
232    @abstractmethod
233    def optimize(
234        self,
235        input_signal: ArrayLike,
236        desired_signal: ArrayLike,
237        **kwargs: Any,
238    ) -> Any:
239        """Run the adaptation procedure.
240
241        Subclasses should return either:
242        - OptimizationResult (recommended), or
243        - dict-like with standardized keys, if you are migrating older code.
244        """
245        raise NotImplementedError
246
247    def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None:
248        """Reset coefficients and history."""
249        if w_new is not None:
250            self.w = np.asarray(w_new, dtype=self._dtype)
251        else:
252            self.w = np.zeros(self.filter_order + 1, dtype=self._dtype)
253        self.w_history = []
254        self._record_history()

Abstract base class for all adaptive filters.

Parameters

filter_order: Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used as a generic size indicator for base allocation. w_init: Initial coefficient vector. If None, initialized to zeros.

Notes

  • Subclasses should set supports_complex = True if they support complex-valued data.
  • Subclasses are expected to call _record_history() every iteration (or use helper methods) if they want coefficient trajectories.
supports_complex: bool = False
filter_order: int
regressor: numpy.ndarray
w_history: List[numpy.ndarray]
def filter_signal( self, input_signal: Union[numpy.ndarray, Sequence[float], Sequence[complex]]) -> numpy.ndarray:
205    def filter_signal(self, input_signal: ArrayLike) -> np.ndarray:
206        """Filter an input signal using current coefficients.
207
208        Default implementation assumes an FIR structure with taps `self.w` and
209        regressor convention:
210            x_k = [x[k], x[k-1], ..., x[k-m]]
211        and output:
212            y[k] = w^H x_k   (Hermitian for complex)
213        """
214        x = np.asarray(input_signal, dtype=self._dtype)
215        n_samples = x.size
216        y = np.zeros(n_samples, dtype=self._dtype)
217
218        x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype)
219        x_padded[self.filter_order:] = x
220
221        for k in range(n_samples):
222            x_k = x_padded[k : k + self.filter_order + 1][::-1]
223            y[k] = np.dot(self.w.conj(), x_k)
224
225        return y

Filter an input signal using current coefficients.

Default implementation assumes an FIR structure with taps self.w and regressor convention: x_k = [x[k], x[k-1], ..., x[k-m]] and output: y[k] = w^H x_k (Hermitian for complex)

@classmethod
def default_test_init_kwargs(cls, order: int) -> dict:
227    @classmethod
228    def default_test_init_kwargs(cls, order: int) -> dict:
229        """Override in subclasses to provide init kwargs for standardized tests."""
230        return {}

Override in subclasses to provide init kwargs for standardized tests.

@abstractmethod
def optimize( self, input_signal: Union[numpy.ndarray, Sequence[float], Sequence[complex]], desired_signal: Union[numpy.ndarray, Sequence[float], Sequence[complex]], **kwargs: Any) -> Any:
232    @abstractmethod
233    def optimize(
234        self,
235        input_signal: ArrayLike,
236        desired_signal: ArrayLike,
237        **kwargs: Any,
238    ) -> Any:
239        """Run the adaptation procedure.
240
241        Subclasses should return either:
242        - OptimizationResult (recommended), or
243        - dict-like with standardized keys, if you are migrating older code.
244        """
245        raise NotImplementedError

Run the adaptation procedure.

Subclasses should return either:

  • OptimizationResult (recommended), or
  • dict-like with standardized keys, if you are migrating older code.
def reset_filter( self, w_new: Union[numpy.ndarray, Sequence[float], Sequence[complex], NoneType] = None) -> None:
247    def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None:
248        """Reset coefficients and history."""
249        if w_new is not None:
250            self.w = np.asarray(w_new, dtype=self._dtype)
251        else:
252            self.w = np.zeros(self.filter_order + 1, dtype=self._dtype)
253        self.w_history = []
254        self._record_history()

Reset coefficients and history.

class LMS(pydaptivefiltering.AdaptiveFilter):
 28class LMS(AdaptiveFilter):
 29    """
 30    Complex Least-Mean Squares (LMS) adaptive filter.
 31
 32    Standard complex LMS algorithm for adaptive FIR filtering, following Diniz
 33    (Alg. 3.2). The method performs a stochastic-gradient update using the
 34    instantaneous a priori error.
 35
 36    Parameters
 37    ----------
 38    filter_order : int
 39        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 40    step_size : float, optional
 41        Adaptation step size ``mu``. Default is 1e-2.
 42    w_init : array_like of complex, optional
 43        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 44        initializes with zeros.
 45
 46    Notes
 47    -----
 48    At iteration ``k``, form the regressor vector (newest sample first):
 49
 50    .. math::
 51        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 52
 53    The a priori output and error are
 54
 55    .. math::
 56        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k],
 57
 58    and the LMS update is
 59
 60    .. math::
 61        w[k+1] = w[k] + \\mu\\, e^*[k] \\, x_k.
 62
 63    This implementation:
 64        - uses complex arithmetic (``supports_complex=True``),
 65        - returns the a priori error ``e[k]``,
 66        - records coefficient history via the base class.
 67
 68    References
 69    ----------
 70    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 71       Implementation*, 5th ed., Algorithm 3.2.
 72    """
 73
 74    supports_complex: bool = True
 75
 76    step_size: float
 77
 78    def __init__(
 79        self,
 80        filter_order: int,
 81        step_size: float = 1e-2,
 82        w_init: Optional[ArrayLike] = None,
 83    ) -> None:
 84        super().__init__(filter_order=int(filter_order), w_init=w_init)
 85        self.step_size = float(step_size)
 86
 87    @validate_input
 88    def optimize(
 89        self,
 90        input_signal: np.ndarray,
 91        desired_signal: np.ndarray,
 92        verbose: bool = False,
 93    ) -> OptimizationResult:
 94        """
 95        Executes the LMS adaptation loop over paired input/desired sequences.
 96
 97        Parameters
 98        ----------
 99        input_signal : array_like of complex
100            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
101        desired_signal : array_like of complex
102            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
103        verbose : bool, optional
104            If True, prints the total runtime after completion.
105
106        Returns
107        -------
108        OptimizationResult
109            Result object with fields:
110            - outputs : ndarray of complex, shape ``(N,)``
111                Scalar output sequence, ``y[k] = w^H[k] x_k``.
112            - errors : ndarray of complex, shape ``(N,)``
113                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
114            - coefficients : ndarray of complex
115                Coefficient history recorded by the base class.
116            - error_type : str
117                Set to ``"a_priori"``.
118        """
119        tic: float = perf_counter()
120
121        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
122        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
123
124        n_samples: int = int(x.size)
125        m: int = int(self.filter_order)
126
127        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
128        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
129
130        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
131        x_padded[m:] = x
132
133        for k in range(n_samples):
134            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
135
136            y_k: complex = complex(np.vdot(self.w, x_k)) 
137            outputs[k] = y_k
138    
139            e_k: complex = d[k] - y_k
140            errors[k] = e_k
141
142            self.w = self.w + self.step_size * np.conj(e_k) * x_k
143
144            self._record_history()
145
146        runtime_s: float = perf_counter() - tic
147        if verbose:
148            print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms")
149
150        return self._pack_results(
151            outputs=outputs,
152            errors=errors,
153            runtime_s=runtime_s,
154            error_type="a_priori",
155        )

Complex Least-Mean Squares (LMS) adaptive filter.

Standard complex LMS algorithm for adaptive FIR filtering, following Diniz (Alg. 3.2). The method performs a stochastic-gradient update using the instantaneous a priori error.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

The a priori output and error are

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k],$$

and the LMS update is

$$w[k+1] = w[k] + \mu\, e^*[k] \, x_k.$$

This implementation: - uses complex arithmetic (supports_complex=True), - returns the a priori error e[k], - records coefficient history via the base class.

References


LMS( filter_order: int, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
78    def __init__(
79        self,
80        filter_order: int,
81        step_size: float = 1e-2,
82        w_init: Optional[ArrayLike] = None,
83    ) -> None:
84        super().__init__(filter_order=int(filter_order), w_init=w_init)
85        self.step_size = float(step_size)
supports_complex: bool = True
step_size: float
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False) -> pydaptivefiltering.base.OptimizationResult:
 87    @validate_input
 88    def optimize(
 89        self,
 90        input_signal: np.ndarray,
 91        desired_signal: np.ndarray,
 92        verbose: bool = False,
 93    ) -> OptimizationResult:
 94        """
 95        Executes the LMS adaptation loop over paired input/desired sequences.
 96
 97        Parameters
 98        ----------
 99        input_signal : array_like of complex
100            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
101        desired_signal : array_like of complex
102            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
103        verbose : bool, optional
104            If True, prints the total runtime after completion.
105
106        Returns
107        -------
108        OptimizationResult
109            Result object with fields:
110            - outputs : ndarray of complex, shape ``(N,)``
111                Scalar output sequence, ``y[k] = w^H[k] x_k``.
112            - errors : ndarray of complex, shape ``(N,)``
113                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
114            - coefficients : ndarray of complex
115                Coefficient history recorded by the base class.
116            - error_type : str
117                Set to ``"a_priori"``.
118        """
119        tic: float = perf_counter()
120
121        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
122        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
123
124        n_samples: int = int(x.size)
125        m: int = int(self.filter_order)
126
127        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
128        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
129
130        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
131        x_padded[m:] = x
132
133        for k in range(n_samples):
134            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
135
136            y_k: complex = complex(np.vdot(self.w, x_k)) 
137            outputs[k] = y_k
138    
139            e_k: complex = d[k] - y_k
140            errors[k] = e_k
141
142            self.w = self.w + self.step_size * np.conj(e_k) * x_k
143
144            self._record_history()
145
146        runtime_s: float = perf_counter() - tic
147        if verbose:
148            print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms")
149
150        return self._pack_results(
151            outputs=outputs,
152            errors=errors,
153            runtime_s=runtime_s,
154            error_type="a_priori",
155        )

Executes the LMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori".

class NLMS(pydaptivefiltering.AdaptiveFilter):
 28class NLMS(AdaptiveFilter):
 29    """
 30    Complex Normalized Least-Mean Squares (NLMS) adaptive filter.
 31
 32    Normalized LMS algorithm for adaptive FIR filtering, following Diniz
 33    (Alg. 4.3). The method normalizes the step size by the instantaneous
 34    regressor energy to improve stability and reduce sensitivity to input
 35    scaling.
 36
 37    Parameters
 38    ----------
 39    filter_order : int
 40        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 41    step_size : float, optional
 42        Base adaptation step size ``mu``. Default is 1e-2.
 43    gamma : float, optional
 44        Regularization constant ``gamma`` used in the normalization denominator
 45        to avoid division by zero (or near-zero regressor energy). Default is 1e-6.
 46    w_init : array_like of complex, optional
 47        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 48        initializes with zeros.
 49
 50    Notes
 51    -----
 52    At iteration ``k``, form the regressor vector (newest sample first):
 53
 54    .. math::
 55        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 56
 57    The a priori output and error are
 58
 59    .. math::
 60        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 61
 62    Define the instantaneous regressor energy
 63
 64    .. math::
 65        \\|x_k\\|^2 = x_k^H x_k,
 66
 67    and the normalized step size
 68
 69    .. math::
 70        \\mu_k = \\frac{\\mu}{\\|x_k\\|^2 + \\gamma}.
 71
 72    The NLMS update is then
 73
 74    .. math::
 75        w[k+1] = w[k] + \\mu_k\\, e^*[k] \\, x_k.
 76
 77    This implementation:
 78        - uses complex arithmetic (``supports_complex=True``),
 79        - returns the a priori error ``e[k]``,
 80        - records coefficient history via the base class.
 81
 82    References
 83    ----------
 84    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 85       Implementation*, 5th ed., Algorithm 4.3.
 86    """
 87
 88    supports_complex: bool = True
 89
 90    step_size: float
 91    gamma: float
 92
 93    def __init__(
 94        self,
 95        filter_order: int,
 96        step_size: float = 1e-2,
 97        gamma: float = 1e-6,
 98        w_init: Optional[ArrayLike] = None,
 99    ) -> None:
100        super().__init__(filter_order=int(filter_order), w_init=w_init)
101        self.step_size = float(step_size)
102        self.gamma = float(gamma)
103
104    @validate_input
105    def optimize(
106        self,
107        input_signal: np.ndarray,
108        desired_signal: np.ndarray,
109        verbose: bool = False,
110    ) -> OptimizationResult:
111        """
112        Executes the NLMS adaptation loop over paired input/desired sequences.
113
114        Parameters
115        ----------
116        input_signal : array_like of complex
117            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
118        desired_signal : array_like of complex
119            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
120        verbose : bool, optional
121            If True, prints the total runtime after completion.
122
123        Returns
124        -------
125        OptimizationResult
126            Result object with fields:
127            - outputs : ndarray of complex, shape ``(N,)``
128                Scalar output sequence, ``y[k] = w^H[k] x_k``.
129            - errors : ndarray of complex, shape ``(N,)``
130                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
131            - coefficients : ndarray of complex
132                Coefficient history recorded by the base class.
133            - error_type : str
134                Set to ``"a_priori"``.
135        """
136        tic: float = perf_counter()
137
138        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
139        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
140
141        n_samples: int = int(x.size)
142        m: int = int(self.filter_order)
143
144        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
145        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
146
147        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
148        x_padded[m:] = x
149
150        for k in range(n_samples):
151            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
152
153            y_k: complex = complex(np.vdot(self.w, x_k))
154            outputs[k] = y_k
155
156            e_k: complex = d[k] - y_k
157            errors[k] = e_k
158
159            norm_xk: float = float(np.vdot(x_k, x_k).real)
160            mu_k: float = self.step_size / (norm_xk + self.gamma)
161
162            self.w = self.w + mu_k * np.conj(e_k) * x_k
163
164            self._record_history()
165
166        runtime_s: float = perf_counter() - tic
167        if verbose:
168            print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms")
169
170        return self._pack_results(
171            outputs=outputs,
172            errors=errors,
173            runtime_s=runtime_s,
174            error_type="a_priori",
175        )

Complex Normalized Least-Mean Squares (NLMS) adaptive filter.

Normalized LMS algorithm for adaptive FIR filtering, following Diniz (Alg. 4.3). The method normalizes the step size by the instantaneous regressor energy to improve stability and reduce sensitivity to input scaling.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. step_size : float, optional Base adaptation step size mu. Default is 1e-2. gamma : float, optional Regularization constant gamma used in the normalization denominator to avoid division by zero (or near-zero regressor energy). Default is 1e-6. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

The a priori output and error are

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Define the instantaneous regressor energy

$$\|x_k\|^2 = x_k^H x_k,$$

and the normalized step size

$$\mu_k = \frac{\mu}{\|x_k\|^2 + \gamma}.$$

The NLMS update is then

$$w[k+1] = w[k] + \mu_k\, e^*[k] \, x_k.$$

This implementation: - uses complex arithmetic (supports_complex=True), - returns the a priori error e[k], - records coefficient history via the base class.

References


NLMS( filter_order: int, step_size: float = 0.01, gamma: float = 1e-06, w_init: Union[numpy.ndarray, list, NoneType] = None)
 93    def __init__(
 94        self,
 95        filter_order: int,
 96        step_size: float = 1e-2,
 97        gamma: float = 1e-6,
 98        w_init: Optional[ArrayLike] = None,
 99    ) -> None:
100        super().__init__(filter_order=int(filter_order), w_init=w_init)
101        self.step_size = float(step_size)
102        self.gamma = float(gamma)
supports_complex: bool = True
step_size: float
gamma: float
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False) -> pydaptivefiltering.base.OptimizationResult:
104    @validate_input
105    def optimize(
106        self,
107        input_signal: np.ndarray,
108        desired_signal: np.ndarray,
109        verbose: bool = False,
110    ) -> OptimizationResult:
111        """
112        Executes the NLMS adaptation loop over paired input/desired sequences.
113
114        Parameters
115        ----------
116        input_signal : array_like of complex
117            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
118        desired_signal : array_like of complex
119            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
120        verbose : bool, optional
121            If True, prints the total runtime after completion.
122
123        Returns
124        -------
125        OptimizationResult
126            Result object with fields:
127            - outputs : ndarray of complex, shape ``(N,)``
128                Scalar output sequence, ``y[k] = w^H[k] x_k``.
129            - errors : ndarray of complex, shape ``(N,)``
130                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
131            - coefficients : ndarray of complex
132                Coefficient history recorded by the base class.
133            - error_type : str
134                Set to ``"a_priori"``.
135        """
136        tic: float = perf_counter()
137
138        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
139        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
140
141        n_samples: int = int(x.size)
142        m: int = int(self.filter_order)
143
144        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
145        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
146
147        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
148        x_padded[m:] = x
149
150        for k in range(n_samples):
151            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
152
153            y_k: complex = complex(np.vdot(self.w, x_k))
154            outputs[k] = y_k
155
156            e_k: complex = d[k] - y_k
157            errors[k] = e_k
158
159            norm_xk: float = float(np.vdot(x_k, x_k).real)
160            mu_k: float = self.step_size / (norm_xk + self.gamma)
161
162            self.w = self.w + mu_k * np.conj(e_k) * x_k
163
164            self._record_history()
165
166        runtime_s: float = perf_counter() - tic
167        if verbose:
168            print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms")
169
170        return self._pack_results(
171            outputs=outputs,
172            errors=errors,
173            runtime_s=runtime_s,
174            error_type="a_priori",
175        )

Executes the NLMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori".

class AffineProjection(pydaptivefiltering.AdaptiveFilter):
 29class AffineProjection(AdaptiveFilter):
 30    """
 31    Complex Affine-Projection Algorithm (APA) adaptive filter.
 32
 33    Affine-projection LMS-type algorithm that reuses the last ``L+1`` regressor
 34    vectors to accelerate convergence relative to LMS/NLMS, following Diniz
 35    (Alg. 4.6). Per iteration, the method solves a small linear system of size
 36    ``(L+1) x (L+1)``.
 37
 38    Parameters
 39    ----------
 40    filter_order : int
 41        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 42    step_size : float, optional
 43        Adaptation step size (relaxation factor) ``mu``. Default is 1e-2.
 44    gamma : float, optional
 45        Diagonal loading (regularization) ``gamma`` applied to the projection
 46        correlation matrix for numerical stability. Default is 1e-6.
 47    L : int, optional
 48        Reuse factor (projection order). The algorithm uses ``L + 1`` most recent
 49        regressors. Default is 2.
 50    w_init : array_like of complex, optional
 51        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 52        initializes with zeros.
 53
 54    Notes
 55    -----
 56    At iteration ``k``, form the projection matrix and desired vector:
 57
 58    - ``X(k) ∈ C^{(L+1) x (M+1)}``, whose rows are regressor vectors, with the most
 59      recent regressor at row 0.
 60    - ``d_vec(k) ∈ C^{L+1}``, stacking the most recent desired samples, with
 61      ``d[k]`` at index 0.
 62
 63    The projection output and error vectors are:
 64
 65    .. math::
 66        y_{vec}(k) = X(k)\\,w^*(k) \\in \\mathbb{C}^{L+1},
 67
 68    .. math::
 69        e_{vec}(k) = d_{vec}(k) - y_{vec}(k).
 70
 71    The update direction ``u(k)`` is obtained by solving the regularized system:
 72
 73    .. math::
 74        (X(k)X^H(k) + \\gamma I_{L+1})\\,u(k) = e_{vec}(k),
 75
 76    and the coefficient update is:
 77
 78    .. math::
 79        w(k+1) = w(k) + \\mu X^H(k)\\,u(k).
 80
 81    This implementation returns only the *most recent* scalar components:
 82
 83    - ``y[k] = y_vec(k)[0]``
 84    - ``e[k] = e_vec(k)[0]``
 85
 86    References
 87    ----------
 88    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 89       Implementation*, 5th ed., Algorithm 4.6.
 90    """
 91
 92    supports_complex: bool = True
 93
 94    step_size: float
 95    gamma: float
 96    memory_length: int
 97
 98    def __init__(
 99        self,
100        filter_order: int,
101        step_size: float = 1e-2,
102        gamma: float = 1e-6,
103        L: int = 2,
104        w_init: Optional[ArrayLike] = None,
105    ) -> None:
106        super().__init__(filter_order=int(filter_order), w_init=w_init)
107        self.step_size = float(step_size)
108        self.gamma = float(gamma)
109        self.memory_length = int(L)
110
111    @validate_input
112    def optimize(
113        self,
114        input_signal: np.ndarray,
115        desired_signal: np.ndarray,
116        verbose: bool = False,
117        return_internal_states: bool = False,
118    ) -> OptimizationResult:
119        """
120        Executes the Affine Projection adaptation loop over paired input/desired sequences.
121
122        Parameters
123        ----------
124        input_signal : array_like of complex
125            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
126        desired_signal : array_like of complex
127            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
128        verbose : bool, optional
129            If True, prints the total runtime after completion.
130        return_internal_states : bool, optional
131            If True, includes the last internal states in ``result.extra``:
132            ``"last_regressor_matrix"`` (``X(k)``) and
133            ``"last_correlation_matrix"`` (``X(k)X^H(k) + gamma I``).
134
135        Returns
136        -------
137        OptimizationResult
138            Result object with fields:
139            - outputs : ndarray of complex, shape ``(N,)``
140                Scalar output sequence, ``y[k] = y_vec(k)[0]``.
141            - errors : ndarray of complex, shape ``(N,)``
142                Scalar a priori error sequence, ``e[k] = e_vec(k)[0]``.
143            - coefficients : ndarray of complex
144                Coefficient history recorded by the base class.
145            - error_type : str
146                Set to ``"a_priori"``.
147            - extra : dict, optional
148                Present only if ``return_internal_states=True``.
149        """
150        tic: float = perf_counter()
151
152        dtype = complex
153        x = np.asarray(input_signal, dtype=dtype).ravel()
154        d = np.asarray(desired_signal, dtype=dtype).ravel()
155        
156        n_samples: int = int(x.size)
157        m: int = int(self.filter_order)
158        L: int = int(self.memory_length)
159
160        outputs: np.ndarray = np.zeros(n_samples, dtype=dtype)
161        errors: np.ndarray = np.zeros(n_samples, dtype=dtype)
162
163        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype)
164        x_padded[m:] = x
165
166        X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype)
167        D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype)
168
169        last_corr: Optional[np.ndarray] = None
170
171        eye_L: np.ndarray = np.eye(L + 1, dtype=dtype)
172
173        for k in range(n_samples):
174            X_matrix[1:] = X_matrix[:-1]
175            X_matrix[0] = x_padded[k : k + m + 1][::-1]
176
177            D_vector[1:] = D_vector[:-1]
178            D_vector[0] = d[k]
179
180            Y_vector: np.ndarray = X_matrix @ self.w.conj()
181            E_vector: np.ndarray = D_vector - Y_vector
182
183            outputs[k] = Y_vector[0]
184            errors[k] = E_vector[0]
185
186            corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L)
187            last_corr = corr_matrix
188
189            try:
190                u: np.ndarray = np.linalg.solve(corr_matrix, E_vector)
191            except np.linalg.LinAlgError:
192                u = np.linalg.pinv(corr_matrix) @ E_vector
193
194            self.w = self.w + self.step_size * (X_matrix.conj().T @ u)
195            self._record_history()
196
197        runtime_s: float = perf_counter() - tic
198        if verbose:
199            print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms")
200
201        extra = None
202        if return_internal_states:
203            extra = {
204                "last_regressor_matrix": X_matrix.copy(),
205                "last_correlation_matrix": None if last_corr is None else last_corr.copy(),
206            }
207
208        return self._pack_results(
209            outputs=outputs,
210            errors=errors,
211            runtime_s=runtime_s,
212            error_type="a_priori",
213            extra=extra,
214        )

Complex Affine-Projection Algorithm (APA) adaptive filter.

Affine-projection LMS-type algorithm that reuses the last L+1 regressor vectors to accelerate convergence relative to LMS/NLMS, following Diniz (Alg. 4.6). Per iteration, the method solves a small linear system of size (L+1) x (L+1).

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. step_size : float, optional Adaptation step size (relaxation factor) mu. Default is 1e-2. gamma : float, optional Diagonal loading (regularization) gamma applied to the projection correlation matrix for numerical stability. Default is 1e-6. L : int, optional Reuse factor (projection order). The algorithm uses L + 1 most recent regressors. Default is 2. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the projection matrix and desired vector:

  • X(k) ∈ C^{(L+1) x (M+1)}, whose rows are regressor vectors, with the most recent regressor at row 0.
  • d_vec(k) ∈ C^{L+1}, stacking the most recent desired samples, with d[k] at index 0.

The projection output and error vectors are:

$$y_{vec}(k) = X(k)\,w^*(k) \in \mathbb{C}^{L+1},$$

$$e_{vec}(k) = d_{vec}(k) - y_{vec}(k).$$

The update direction u(k) is obtained by solving the regularized system:

$$(X(k)X^H(k) + \gamma I_{L+1})\,u(k) = e_{vec}(k),$$

and the coefficient update is:

$$w(k+1) = w(k) + \mu X^H(k)\,u(k).$$

This implementation returns only the most recent scalar components:

  • y[k] = y_vec(k)[0]
  • e[k] = e_vec(k)[0]

References


AffineProjection( filter_order: int, step_size: float = 0.01, gamma: float = 1e-06, L: int = 2, w_init: Union[numpy.ndarray, list, NoneType] = None)
 98    def __init__(
 99        self,
100        filter_order: int,
101        step_size: float = 1e-2,
102        gamma: float = 1e-6,
103        L: int = 2,
104        w_init: Optional[ArrayLike] = None,
105    ) -> None:
106        super().__init__(filter_order=int(filter_order), w_init=w_init)
107        self.step_size = float(step_size)
108        self.gamma = float(gamma)
109        self.memory_length = int(L)
supports_complex: bool = True
step_size: float
gamma: float
memory_length: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
111    @validate_input
112    def optimize(
113        self,
114        input_signal: np.ndarray,
115        desired_signal: np.ndarray,
116        verbose: bool = False,
117        return_internal_states: bool = False,
118    ) -> OptimizationResult:
119        """
120        Executes the Affine Projection adaptation loop over paired input/desired sequences.
121
122        Parameters
123        ----------
124        input_signal : array_like of complex
125            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
126        desired_signal : array_like of complex
127            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
128        verbose : bool, optional
129            If True, prints the total runtime after completion.
130        return_internal_states : bool, optional
131            If True, includes the last internal states in ``result.extra``:
132            ``"last_regressor_matrix"`` (``X(k)``) and
133            ``"last_correlation_matrix"`` (``X(k)X^H(k) + gamma I``).
134
135        Returns
136        -------
137        OptimizationResult
138            Result object with fields:
139            - outputs : ndarray of complex, shape ``(N,)``
140                Scalar output sequence, ``y[k] = y_vec(k)[0]``.
141            - errors : ndarray of complex, shape ``(N,)``
142                Scalar a priori error sequence, ``e[k] = e_vec(k)[0]``.
143            - coefficients : ndarray of complex
144                Coefficient history recorded by the base class.
145            - error_type : str
146                Set to ``"a_priori"``.
147            - extra : dict, optional
148                Present only if ``return_internal_states=True``.
149        """
150        tic: float = perf_counter()
151
152        dtype = complex
153        x = np.asarray(input_signal, dtype=dtype).ravel()
154        d = np.asarray(desired_signal, dtype=dtype).ravel()
155        
156        n_samples: int = int(x.size)
157        m: int = int(self.filter_order)
158        L: int = int(self.memory_length)
159
160        outputs: np.ndarray = np.zeros(n_samples, dtype=dtype)
161        errors: np.ndarray = np.zeros(n_samples, dtype=dtype)
162
163        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype)
164        x_padded[m:] = x
165
166        X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype)
167        D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype)
168
169        last_corr: Optional[np.ndarray] = None
170
171        eye_L: np.ndarray = np.eye(L + 1, dtype=dtype)
172
173        for k in range(n_samples):
174            X_matrix[1:] = X_matrix[:-1]
175            X_matrix[0] = x_padded[k : k + m + 1][::-1]
176
177            D_vector[1:] = D_vector[:-1]
178            D_vector[0] = d[k]
179
180            Y_vector: np.ndarray = X_matrix @ self.w.conj()
181            E_vector: np.ndarray = D_vector - Y_vector
182
183            outputs[k] = Y_vector[0]
184            errors[k] = E_vector[0]
185
186            corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L)
187            last_corr = corr_matrix
188
189            try:
190                u: np.ndarray = np.linalg.solve(corr_matrix, E_vector)
191            except np.linalg.LinAlgError:
192                u = np.linalg.pinv(corr_matrix) @ E_vector
193
194            self.w = self.w + self.step_size * (X_matrix.conj().T @ u)
195            self._record_history()
196
197        runtime_s: float = perf_counter() - tic
198        if verbose:
199            print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms")
200
201        extra = None
202        if return_internal_states:
203            extra = {
204                "last_regressor_matrix": X_matrix.copy(),
205                "last_correlation_matrix": None if last_corr is None else last_corr.copy(),
206            }
207
208        return self._pack_results(
209            outputs=outputs,
210            errors=errors,
211            runtime_s=runtime_s,
212            error_type="a_priori",
213            extra=extra,
214        )

Executes the Affine Projection adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "last_regressor_matrix" (X(k)) and "last_correlation_matrix" (X(k)X^H(k) + gamma I).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = y_vec(k)[0]. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = e_vec(k)[0]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class SignData(pydaptivefiltering.AdaptiveFilter):
 28class SignData(AdaptiveFilter):
 29    """
 30    Complex Sign-Data LMS adaptive filter.
 31
 32    Low-complexity LMS variant in which the regressor vector is replaced by its
 33    element-wise sign. This reduces multiplications (since the update uses a
 34    ternary/sign regressor), at the expense of slower convergence and/or larger
 35    steady-state misadjustment in many scenarios.
 36
 37    Parameters
 38    ----------
 39    filter_order : int
 40        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 41    step_size : float, optional
 42        Adaptation step size ``mu``. Default is 1e-2.
 43    w_init : array_like of complex, optional
 44        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 45        initializes with zeros.
 46
 47    Notes
 48    -----
 49    At iteration ``k``, form the regressor vector (newest sample first):
 50
 51    .. math::
 52        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 53
 54    The a priori output and error are
 55
 56    .. math::
 57        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 58
 59    Define the element-wise sign regressor ``\\operatorname{sign}(x_k)``.
 60    The update implemented here is
 61
 62    .. math::
 63        w[k+1] = w[k] + 2\\mu\\, e^*[k] \\, \\operatorname{sign}(x_k).
 64
 65    Implementation details
 66        - For complex inputs, ``numpy.sign`` applies element-wise and returns
 67          ``x/|x|`` when ``x != 0`` and ``0`` when ``x == 0``.
 68        - The factor ``2`` in the update matches the implementation in this
 69          module (consistent with common LMS gradient conventions).
 70
 71    References
 72    ----------
 73    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 74       Implementation*, 5th ed., Algorithm 4.1 (sign-based LMS variants).
 75    """
 76
 77    supports_complex: bool = True
 78
 79    def __init__(
 80        self,
 81        filter_order: int,
 82        step_size: float = 1e-2,
 83        w_init: Optional[ArrayLike] = None,
 84    ) -> None:
 85        super().__init__(filter_order=int(filter_order), w_init=w_init)
 86        self.step_size = float(step_size)
 87
 88    @validate_input
 89    def optimize(
 90        self,
 91        input_signal: np.ndarray,
 92        desired_signal: np.ndarray,
 93        verbose: bool = False,
 94        return_internal_states: bool = False,
 95    ) -> OptimizationResult:
 96        """
 97        Executes the Sign-Data LMS adaptation loop over paired input/desired sequences.
 98
 99        Parameters
100        ----------
101        input_signal : array_like of complex
102            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
103        desired_signal : array_like of complex
104            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
105        verbose : bool, optional
106            If True, prints the total runtime after completion.
107        return_internal_states : bool, optional
108            If True, includes the last internal state in ``result.extra``:
109            ``"last_sign_regressor"`` (``sign(x_k)``).
110
111        Returns
112        -------
113        OptimizationResult
114            Result object with fields:
115            - outputs : ndarray of complex, shape ``(N,)``
116                Scalar output sequence, ``y[k] = w^H[k] x_k``.
117            - errors : ndarray of complex, shape ``(N,)``
118                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
119            - coefficients : ndarray of complex
120                Coefficient history recorded by the base class.
121            - error_type : str
122                Set to ``"a_priori"``.
123            - extra : dict, optional
124                Present only if ``return_internal_states=True``.
125        """
126        t0 = perf_counter()
127
128        x = np.asarray(input_signal, dtype=complex).ravel()
129        d = np.asarray(desired_signal, dtype=complex).ravel()
130
131        n_samples = int(x.size)
132        m = int(self.filter_order)
133
134        outputs = np.zeros(n_samples, dtype=complex)
135        errors = np.zeros(n_samples, dtype=complex)
136
137        x_padded = np.zeros(n_samples + m, dtype=complex)
138        x_padded[m:] = x
139
140        last_sign_xk: Optional[np.ndarray] = None
141
142        for k in range(n_samples):
143            x_k = x_padded[k : k + m + 1][::-1]
144
145            y_k = complex(np.vdot(self.w, x_k))
146            outputs[k] = y_k
147
148            e_k = d[k] - y_k
149            errors[k] = e_k
150
151            sign_xk = np.sign(x_k)
152            last_sign_xk = sign_xk
153
154            self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk
155            self._record_history()
156
157        runtime_s = float(perf_counter() - t0)
158        if verbose:
159            print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms")
160
161        extra: Optional[Dict[str, Any]] = None
162        if return_internal_states:
163            extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()}
164
165        return self._pack_results(
166            outputs=outputs,
167            errors=errors,
168            runtime_s=runtime_s,
169            error_type="a_priori",
170            extra=extra,
171        )

Complex Sign-Data LMS adaptive filter.

Low-complexity LMS variant in which the regressor vector is replaced by its element-wise sign. This reduces multiplications (since the update uses a ternary/sign regressor), at the expense of slower convergence and/or larger steady-state misadjustment in many scenarios.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

The a priori output and error are

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Define the element-wise sign regressor \operatorname{sign}(x_k). The update implemented here is

$$w[k+1] = w[k] + 2\mu\, e^*[k] \, \operatorname{sign}(x_k).$$

Implementation details - For complex inputs, numpy.sign applies element-wise and returns x/|x| when x != 0 and 0 when x == 0. - The factor 2 in the update matches the implementation in this module (consistent with common LMS gradient conventions).

References


SignData( filter_order: int, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
79    def __init__(
80        self,
81        filter_order: int,
82        step_size: float = 1e-2,
83        w_init: Optional[ArrayLike] = None,
84    ) -> None:
85        super().__init__(filter_order=int(filter_order), w_init=w_init)
86        self.step_size = float(step_size)
supports_complex: bool = True
step_size
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
 88    @validate_input
 89    def optimize(
 90        self,
 91        input_signal: np.ndarray,
 92        desired_signal: np.ndarray,
 93        verbose: bool = False,
 94        return_internal_states: bool = False,
 95    ) -> OptimizationResult:
 96        """
 97        Executes the Sign-Data LMS adaptation loop over paired input/desired sequences.
 98
 99        Parameters
100        ----------
101        input_signal : array_like of complex
102            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
103        desired_signal : array_like of complex
104            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
105        verbose : bool, optional
106            If True, prints the total runtime after completion.
107        return_internal_states : bool, optional
108            If True, includes the last internal state in ``result.extra``:
109            ``"last_sign_regressor"`` (``sign(x_k)``).
110
111        Returns
112        -------
113        OptimizationResult
114            Result object with fields:
115            - outputs : ndarray of complex, shape ``(N,)``
116                Scalar output sequence, ``y[k] = w^H[k] x_k``.
117            - errors : ndarray of complex, shape ``(N,)``
118                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
119            - coefficients : ndarray of complex
120                Coefficient history recorded by the base class.
121            - error_type : str
122                Set to ``"a_priori"``.
123            - extra : dict, optional
124                Present only if ``return_internal_states=True``.
125        """
126        t0 = perf_counter()
127
128        x = np.asarray(input_signal, dtype=complex).ravel()
129        d = np.asarray(desired_signal, dtype=complex).ravel()
130
131        n_samples = int(x.size)
132        m = int(self.filter_order)
133
134        outputs = np.zeros(n_samples, dtype=complex)
135        errors = np.zeros(n_samples, dtype=complex)
136
137        x_padded = np.zeros(n_samples + m, dtype=complex)
138        x_padded[m:] = x
139
140        last_sign_xk: Optional[np.ndarray] = None
141
142        for k in range(n_samples):
143            x_k = x_padded[k : k + m + 1][::-1]
144
145            y_k = complex(np.vdot(self.w, x_k))
146            outputs[k] = y_k
147
148            e_k = d[k] - y_k
149            errors[k] = e_k
150
151            sign_xk = np.sign(x_k)
152            last_sign_xk = sign_xk
153
154            self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk
155            self._record_history()
156
157        runtime_s = float(perf_counter() - t0)
158        if verbose:
159            print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms")
160
161        extra: Optional[Dict[str, Any]] = None
162        if return_internal_states:
163            extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()}
164
165        return self._pack_results(
166            outputs=outputs,
167            errors=errors,
168            runtime_s=runtime_s,
169            error_type="a_priori",
170            extra=extra,
171        )

Executes the Sign-Data LMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal state in result.extra: "last_sign_regressor" (sign(x_k)).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class SignError(pydaptivefiltering.AdaptiveFilter):
 29class SignError(AdaptiveFilter):
 30    """
 31    Sign-Error LMS adaptive filter (real-valued).
 32
 33    Low-complexity LMS variant that replaces the instantaneous error by its sign.
 34    This reduces multiplications and can improve robustness under impulsive noise
 35    in some scenarios, at the expense of slower convergence and/or larger
 36    steady-state misadjustment.
 37
 38    Parameters
 39    ----------
 40    filter_order : int
 41        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 42    step_size : float, optional
 43        Adaptation step size ``mu``. Default is 1e-2.
 44    w_init : array_like of float, optional
 45        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 46        initializes with zeros.
 47
 48    Notes
 49    -----
 50    Real-valued only
 51        This implementation is restricted to real-valued signals and coefficients
 52        (``supports_complex=False``). The constraint is enforced via
 53        ``@ensure_real_signals`` on :meth:`optimize`.
 54
 55    At iteration ``k``, form the regressor vector (newest sample first):
 56
 57    .. math::
 58        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{R}^{M+1}.
 59
 60    The a priori output and error are
 61
 62    .. math::
 63        y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k].
 64
 65    The sign-error update implemented here is
 66
 67    .. math::
 68        w[k+1] = w[k] + \\mu\\, \\operatorname{sign}(e[k])\\, x_k.
 69
 70    Implementation details
 71        - ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` the update is null.
 72
 73    References
 74    ----------
 75    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 76       Implementation*, 5th ed., Algorithm 4.1 (sign-based LMS variants).
 77    """
 78
 79    supports_complex: bool = False
 80    step_size: float
 81    def __init__(
 82        self,
 83        filter_order: int,
 84        step_size: float = 1e-2,
 85        w_init: Optional[ArrayLike] = None,
 86    ) -> None:
 87        super().__init__(filter_order=int(filter_order), w_init=w_init)
 88        self.step_size = float(step_size)
 89
 90    @validate_input
 91    @ensure_real_signals
 92    def optimize(
 93        self,
 94        input_signal: np.ndarray,
 95        desired_signal: np.ndarray,
 96        verbose: bool = False,
 97        return_internal_states: bool = False,
 98    ) -> OptimizationResult:
 99        """
100        Executes the Sign-Error LMS adaptation loop over paired input/desired sequences.
101
102        Parameters
103        ----------
104        input_signal : array_like of float
105            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
106        desired_signal : array_like of float
107            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
108        verbose : bool, optional
109            If True, prints the total runtime after completion.
110        return_internal_states : bool, optional
111            If True, includes the last internal state in ``result.extra``:
112            ``"last_sign_error"`` (``sign(e[k])``).
113
114        Returns
115        -------
116        OptimizationResult
117            Result object with fields:
118            - outputs : ndarray of float, shape ``(N,)``
119                Scalar output sequence, ``y[k] = w^T[k] x_k``.
120            - errors : ndarray of float, shape ``(N,)``
121                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
122            - coefficients : ndarray of float
123                Coefficient history recorded by the base class.
124            - error_type : str
125                Set to ``"a_priori"``.
126            - extra : dict, optional
127                Present only if ``return_internal_states=True``.
128        """
129        t0 = perf_counter()
130
131        x = np.asarray(input_signal, dtype=np.float64).ravel()
132        d = np.asarray(desired_signal, dtype=np.float64).ravel()
133
134        n_samples = int(x.size)
135        m = int(self.filter_order)
136
137        outputs = np.zeros(n_samples, dtype=np.float64)
138        errors = np.zeros(n_samples, dtype=np.float64)
139
140        x_padded = np.zeros(n_samples + m, dtype=np.float64)
141        x_padded[m:] = x
142
143        last_sign_e: Optional[float] = None
144
145        for k in range(n_samples):
146            x_k = x_padded[k : k + m + 1][::-1]
147
148            y_k = float(np.dot(self.w, x_k))
149            outputs[k] = y_k
150
151            e_k = float(d[k] - y_k)
152            errors[k] = e_k
153
154            s = float(np.sign(e_k))
155            last_sign_e = s
156
157            self.w = self.w + self.step_size * s * x_k
158            self._record_history()
159
160        runtime_s = float(perf_counter() - t0)
161        if verbose:
162            print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms")
163
164        extra: Optional[Dict[str, Any]] = None
165        if return_internal_states:
166            extra = {"last_sign_error": last_sign_e}
167
168        return self._pack_results(
169            outputs=outputs,
170            errors=errors,
171            runtime_s=runtime_s,
172            error_type="a_priori",
173            extra=extra,
174        )

Sign-Error LMS adaptive filter (real-valued).

Low-complexity LMS variant that replaces the instantaneous error by its sign. This reduces multiplications and can improve robustness under impulsive noise in some scenarios, at the expense of slower convergence and/or larger steady-state misadjustment.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

At iteration k, form the regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{R}^{M+1}.$$

The a priori output and error are

$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$

The sign-error update implemented here is

$$w[k+1] = w[k] + \mu\, \operatorname{sign}(e[k])\, x_k.$$

Implementation details - numpy.sign(0) = 0; therefore if e[k] == 0 the update is null.

References


SignError( filter_order: int, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
81    def __init__(
82        self,
83        filter_order: int,
84        step_size: float = 1e-2,
85        w_init: Optional[ArrayLike] = None,
86    ) -> None:
87        super().__init__(filter_order=int(filter_order), w_init=w_init)
88        self.step_size = float(step_size)
supports_complex: bool = False
step_size: float
@validate_input
@ensure_real_signals
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
 90    @validate_input
 91    @ensure_real_signals
 92    def optimize(
 93        self,
 94        input_signal: np.ndarray,
 95        desired_signal: np.ndarray,
 96        verbose: bool = False,
 97        return_internal_states: bool = False,
 98    ) -> OptimizationResult:
 99        """
100        Executes the Sign-Error LMS adaptation loop over paired input/desired sequences.
101
102        Parameters
103        ----------
104        input_signal : array_like of float
105            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
106        desired_signal : array_like of float
107            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
108        verbose : bool, optional
109            If True, prints the total runtime after completion.
110        return_internal_states : bool, optional
111            If True, includes the last internal state in ``result.extra``:
112            ``"last_sign_error"`` (``sign(e[k])``).
113
114        Returns
115        -------
116        OptimizationResult
117            Result object with fields:
118            - outputs : ndarray of float, shape ``(N,)``
119                Scalar output sequence, ``y[k] = w^T[k] x_k``.
120            - errors : ndarray of float, shape ``(N,)``
121                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
122            - coefficients : ndarray of float
123                Coefficient history recorded by the base class.
124            - error_type : str
125                Set to ``"a_priori"``.
126            - extra : dict, optional
127                Present only if ``return_internal_states=True``.
128        """
129        t0 = perf_counter()
130
131        x = np.asarray(input_signal, dtype=np.float64).ravel()
132        d = np.asarray(desired_signal, dtype=np.float64).ravel()
133
134        n_samples = int(x.size)
135        m = int(self.filter_order)
136
137        outputs = np.zeros(n_samples, dtype=np.float64)
138        errors = np.zeros(n_samples, dtype=np.float64)
139
140        x_padded = np.zeros(n_samples + m, dtype=np.float64)
141        x_padded[m:] = x
142
143        last_sign_e: Optional[float] = None
144
145        for k in range(n_samples):
146            x_k = x_padded[k : k + m + 1][::-1]
147
148            y_k = float(np.dot(self.w, x_k))
149            outputs[k] = y_k
150
151            e_k = float(d[k] - y_k)
152            errors[k] = e_k
153
154            s = float(np.sign(e_k))
155            last_sign_e = s
156
157            self.w = self.w + self.step_size * s * x_k
158            self._record_history()
159
160        runtime_s = float(perf_counter() - t0)
161        if verbose:
162            print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms")
163
164        extra: Optional[Dict[str, Any]] = None
165        if return_internal_states:
166            extra = {"last_sign_error": last_sign_e}
167
168        return self._pack_results(
169            outputs=outputs,
170            errors=errors,
171            runtime_s=runtime_s,
172            error_type="a_priori",
173            extra=extra,
174        )

Executes the Sign-Error LMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal state in result.extra: "last_sign_error" (sign(e[k])).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence, y[k] = w^T[k] x_k. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class DualSign(pydaptivefiltering.AdaptiveFilter):
 29class DualSign(AdaptiveFilter):
 30    """
 31    Dual-Sign LMS (DS-LMS) adaptive filter (real-valued).
 32
 33    Low-complexity LMS variant that uses the *sign* of the instantaneous error
 34    and a two-level (piecewise) effective gain selected by the error magnitude.
 35    This can reduce the number of multiplications and may improve robustness
 36    under impulsive noise in some scenarios, at the expense of larger steady-state
 37    misadjustment.
 38
 39    Parameters
 40    ----------
 41    filter_order : int
 42        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 43    rho : float
 44        Threshold ``rho`` applied to ``|e[k]|`` to select the gain level.
 45    gamma : float
 46        Gain multiplier applied when ``|e[k]| > rho`` (typically ``gamma > 1``).
 47    step : float, optional
 48        Adaptation step size ``mu``. Default is 1e-2.
 49    w_init : array_like of float, optional
 50        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 51        initializes with zeros.
 52    safe_eps : float, optional
 53        Small positive constant kept for API consistency across the library.
 54        (Not used by this implementation.) Default is 1e-12.
 55
 56    Notes
 57    -----
 58    Real-valued only
 59        This implementation is restricted to real-valued signals and coefficients
 60        (``supports_complex=False``). The constraint is enforced via
 61        ``@ensure_real_signals`` on :meth:`optimize`.
 62
 63    Update rule (as implemented)
 64        Let the regressor vector be
 65
 66        .. math::
 67            x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T,
 68
 69        with output and error
 70
 71        .. math::
 72            y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k].
 73
 74        Define the two-level signed term
 75
 76        .. math::
 77            u[k] =
 78            \\begin{cases}
 79                \\operatorname{sign}(e[k]), & |e[k]| \\le \\rho \\\\
 80                \\gamma\\,\\operatorname{sign}(e[k]), & |e[k]| > \\rho
 81            \\end{cases}
 82
 83        and update
 84
 85        .. math::
 86            w[k+1] = w[k] + 2\\mu\\,u[k]\,x_k.
 87
 88    Implementation details
 89        - ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` the update is null.
 90        - The factor ``2`` in the update matches the implementation in this
 91          module (consistent with common LMS gradient conventions).
 92
 93    References
 94    ----------
 95    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 96       Implementation*, 5th ed., Algorithm 4.1 (modified sign-based variant).
 97    """
 98
 99    supports_complex: bool = False
100
101    rho: float
102    gamma: float
103    step_size: float
104
105    def __init__(
106        self,
107        filter_order: int,
108        rho: float,
109        gamma: float,
110        step: float = 1e-2,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        safe_eps: float = 1e-12,
114    ) -> None:
115        super().__init__(filter_order=int(filter_order), w_init=w_init)
116        self.rho = float(rho)
117        self.gamma = float(gamma)
118        self.step_size = float(step)
119        self._safe_eps = float(safe_eps)
120
121    @validate_input
122    @ensure_real_signals
123    def optimize(
124        self,
125        input_signal: np.ndarray,
126        desired_signal: np.ndarray,
127        verbose: bool = False,
128    ) -> OptimizationResult:
129        """
130        Executes the DS-LMS adaptation loop over paired input/desired sequences.
131
132        Parameters
133        ----------
134        input_signal : array_like of float
135            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
136        desired_signal : array_like of float
137            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
138        verbose : bool, optional
139            If True, prints the total runtime after completion.
140
141        Returns
142        -------
143        OptimizationResult
144            Result object with fields:
145            - outputs : ndarray of float, shape ``(N,)``
146                Scalar output sequence, ``y[k] = w^T[k] x_k``.
147            - errors : ndarray of float, shape ``(N,)``
148                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
149            - coefficients : ndarray of float
150                Coefficient history recorded by the base class.
151            - error_type : str
152                Set to ``"a_priori"``.
153        """
154        tic: float = perf_counter()
155
156        x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel()
157        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel()
158
159        n_samples: int = int(x.size)
160        m: int = int(self.filter_order)
161
162        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
163        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
164
165        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64)
166        x_padded[m:] = x
167
168        for k in range(n_samples):
169            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
170
171            y_k: float = float(np.dot(self.w, x_k))
172            outputs[k] = y_k
173
174            e_k: float = float(d[k] - y_k)
175            errors[k] = e_k
176
177            s: float = float(np.sign(e_k))
178            if abs(e_k) > self.rho:
179                s *= self.gamma
180
181            self.w = self.w + (2.0 * self.step_size) * s * x_k
182            self._record_history()
183
184        runtime_s: float = perf_counter() - tic
185        if verbose:
186            print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms")
187
188        return self._pack_results(
189            outputs=outputs,
190            errors=errors,
191            runtime_s=runtime_s,
192            error_type="a_priori",
193        )

Dual-Sign LMS (DS-LMS) adaptive filter (real-valued).

Low-complexity LMS variant that uses the sign of the instantaneous error and a two-level (piecewise) effective gain selected by the error magnitude. This can reduce the number of multiplications and may improve robustness under impulsive noise in some scenarios, at the expense of larger steady-state misadjustment.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. rho : float Threshold rho applied to |e[k]| to select the gain level. gamma : float Gain multiplier applied when |e[k]| > rho (typically gamma > 1). step : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. safe_eps : float, optional Small positive constant kept for API consistency across the library. (Not used by this implementation.) Default is 1e-12.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Update rule (as implemented) Let the regressor vector be

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T,$$

with output and error

$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$

Define the two-level signed term

$$u[k] =

\begin{cases} \operatorname{sign}(e[k]), & |e[k]| \le \rho \ \gamma\,\operatorname{sign}(e[k]), & |e[k]| > \rho \end{cases}$$

and update

$$w[k+1] = w[k] + 2\mu\,u[k]\,x_k.$$

Implementation details - numpy.sign(0) = 0; therefore if e[k] == 0 the update is null. - The factor 2 in the update matches the implementation in this module (consistent with common LMS gradient conventions).

References


DualSign( filter_order: int, rho: float, gamma: float, step: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
105    def __init__(
106        self,
107        filter_order: int,
108        rho: float,
109        gamma: float,
110        step: float = 1e-2,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        safe_eps: float = 1e-12,
114    ) -> None:
115        super().__init__(filter_order=int(filter_order), w_init=w_init)
116        self.rho = float(rho)
117        self.gamma = float(gamma)
118        self.step_size = float(step)
119        self._safe_eps = float(safe_eps)
supports_complex: bool = False
rho: float
gamma: float
step_size: float
@validate_input
@ensure_real_signals
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False) -> pydaptivefiltering.base.OptimizationResult:
121    @validate_input
122    @ensure_real_signals
123    def optimize(
124        self,
125        input_signal: np.ndarray,
126        desired_signal: np.ndarray,
127        verbose: bool = False,
128    ) -> OptimizationResult:
129        """
130        Executes the DS-LMS adaptation loop over paired input/desired sequences.
131
132        Parameters
133        ----------
134        input_signal : array_like of float
135            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
136        desired_signal : array_like of float
137            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
138        verbose : bool, optional
139            If True, prints the total runtime after completion.
140
141        Returns
142        -------
143        OptimizationResult
144            Result object with fields:
145            - outputs : ndarray of float, shape ``(N,)``
146                Scalar output sequence, ``y[k] = w^T[k] x_k``.
147            - errors : ndarray of float, shape ``(N,)``
148                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
149            - coefficients : ndarray of float
150                Coefficient history recorded by the base class.
151            - error_type : str
152                Set to ``"a_priori"``.
153        """
154        tic: float = perf_counter()
155
156        x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel()
157        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel()
158
159        n_samples: int = int(x.size)
160        m: int = int(self.filter_order)
161
162        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
163        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
164
165        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64)
166        x_padded[m:] = x
167
168        for k in range(n_samples):
169            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
170
171            y_k: float = float(np.dot(self.w, x_k))
172            outputs[k] = y_k
173
174            e_k: float = float(d[k] - y_k)
175            errors[k] = e_k
176
177            s: float = float(np.sign(e_k))
178            if abs(e_k) > self.rho:
179                s *= self.gamma
180
181            self.w = self.w + (2.0 * self.step_size) * s * x_k
182            self._record_history()
183
184        runtime_s: float = perf_counter() - tic
185        if verbose:
186            print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms")
187
188        return self._pack_results(
189            outputs=outputs,
190            errors=errors,
191            runtime_s=runtime_s,
192            error_type="a_priori",
193        )

Executes the DS-LMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence, y[k] = w^T[k] x_k. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_priori".

class LMSNewton(pydaptivefiltering.AdaptiveFilter):
 28class LMSNewton(AdaptiveFilter):
 29    """
 30    Complex LMS-Newton adaptive filter.
 31
 32    LMS-Newton accelerates the standard complex LMS by preconditioning the
 33    instantaneous gradient with a recursive estimate of the inverse input
 34    correlation matrix. This often improves convergence speed for strongly
 35    correlated inputs, at the cost of maintaining and updating a full
 36    ``(M+1) x (M+1)`` matrix per iteration.
 37
 38    Parameters
 39    ----------
 40    filter_order : int
 41        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 42    alpha : float
 43        Forgetting factor ``alpha`` used in the inverse-correlation recursion,
 44        with ``0 < alpha < 1``. Values closer to 1 yield smoother tracking; smaller
 45        values adapt faster.
 46    initial_inv_rx : array_like of complex
 47        Initial inverse correlation matrix ``P(0)`` with shape ``(M + 1, M + 1)``.
 48        Typical choices are scaled identities, e.g. ``delta^{-1} I``.
 49    step : float, optional
 50        Adaptation step size ``mu``. Default is 1e-2.
 51    w_init : array_like of complex, optional
 52        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 53        initializes with zeros.
 54    safe_eps : float, optional
 55        Small positive constant used to guard denominators in the matrix recursion.
 56        Default is 1e-12.
 57
 58    Notes
 59    -----
 60    Complex-valued
 61        This implementation assumes complex arithmetic (``supports_complex=True``),
 62        with the a priori output computed as ``y[k] = w^H[k] x_k``.
 63
 64    Recursion (as implemented)
 65        Let the regressor vector be
 66
 67        .. math::
 68            x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1},
 69
 70        and define the output and a priori error as
 71
 72        .. math::
 73            y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 74
 75        Maintain an estimate ``P[k] \\approx R_x^{-1}`` using a normalized rank-1 update.
 76        With
 77
 78        .. math::
 79            p_k = P[k] x_k, \\qquad \\phi_k = x_k^H p_k,
 80
 81        the denominator is
 82
 83        .. math::
 84            \\mathrm{denom}_k = \\frac{1-\\alpha}{\\alpha} + \\phi_k,
 85
 86        and the update used here is
 87
 88        .. math::
 89            P[k+1] =
 90            \\frac{1}{1-\\alpha}
 91            \\left(
 92                P[k] - \\frac{p_k p_k^H}{\\mathrm{denom}_k}
 93            \\right).
 94
 95        The coefficient update uses the preconditioned regressor ``P[k+1] x_k``:
 96
 97        .. math::
 98            w[k+1] = w[k] + \\mu\\, e^*[k] \\, (P[k+1] x_k).
 99
100    Relationship to RLS
101        The recursion for ``P`` is algebraically similar to an RLS covariance update
102        with a particular normalization; however, the coefficient update remains
103        LMS-like, controlled by the step size ``mu``.
104
105    References
106    ----------
107    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
108       Implementation*, 5th ed., Algorithm 4.2.
109    """
110
111    supports_complex: bool = True
112
113    alpha: float
114    step_size: float
115    inv_rx: np.ndarray
116
117    def __init__(
118        self,
119        filter_order: int,
120        alpha: float,
121        initial_inv_rx: np.ndarray,
122        step: float = 1e-2,
123        w_init: Optional[ArrayLike] = None,
124        *,
125        safe_eps: float = 1e-12,
126    ) -> None:
127        super().__init__(filter_order=int(filter_order), w_init=w_init)
128
129        self.alpha = float(alpha)
130        if not (0.0 < self.alpha < 1.0):
131            raise ValueError(f"alpha must satisfy 0 < alpha < 1. Got alpha={self.alpha}.")
132
133        P0 = np.asarray(initial_inv_rx, dtype=complex)
134        n_taps = int(filter_order) + 1
135        if P0.shape != (n_taps, n_taps):
136            raise ValueError(
137                f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}."
138            )
139        self.inv_rx = P0
140
141        self.step_size = float(step)
142        self._safe_eps = float(safe_eps)
143
144    @validate_input
145    def optimize(
146        self,
147        input_signal: np.ndarray,
148        desired_signal: np.ndarray,
149        verbose: bool = False,
150    ) -> OptimizationResult:
151        """
152        Executes the LMS-Newton adaptation loop over paired input/desired sequences.
153
154        Parameters
155        ----------
156        input_signal : array_like of complex
157            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
158        desired_signal : array_like of complex
159            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
160        verbose : bool, optional
161            If True, prints the total runtime after completion.
162
163        Returns
164        -------
165        OptimizationResult
166            Result object with fields:
167            - outputs : ndarray of complex, shape ``(N,)``
168                Scalar output sequence, ``y[k] = w^H[k] x_k``.
169            - errors : ndarray of complex, shape ``(N,)``
170                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
171            - coefficients : ndarray of complex
172                Coefficient history recorded by the base class.
173            - error_type : str
174                Set to ``"a_priori"``.
175        """
176        tic: float = perf_counter()
177
178        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
179        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
180
181        n_samples: int = int(x.size)
182        m: int = int(self.filter_order)
183
184        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
185        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
186
187        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
188        x_padded[m:] = x
189
190        for k in range(n_samples):
191            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
192
193            y_k: complex = complex(np.vdot(self.w, x_k))
194            outputs[k] = y_k
195
196            e_k: complex = d[k] - y_k
197            errors[k] = e_k
198
199            x_col: np.ndarray = x_k.reshape(-1, 1)
200            Px: np.ndarray = self.inv_rx @ x_col
201            phi: complex = (x_col.conj().T @ Px).item()
202
203            denom: complex = ((1.0 - self.alpha) / self.alpha) + phi
204            if abs(denom) < self._safe_eps:
205                denom = denom + (self._safe_eps + 0.0j)
206
207            self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.alpha)
208
209            self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel()
210
211            self._record_history()
212
213        runtime_s: float = perf_counter() - tic
214        if verbose:
215            print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms")
216
217        return self._pack_results(
218            outputs=outputs,
219            errors=errors,
220            runtime_s=runtime_s,
221            error_type="a_priori",
222        )

Complex LMS-Newton adaptive filter.

LMS-Newton accelerates the standard complex LMS by preconditioning the instantaneous gradient with a recursive estimate of the inverse input correlation matrix. This often improves convergence speed for strongly correlated inputs, at the cost of maintaining and updating a full (M+1) x (M+1) matrix per iteration.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. alpha : float Forgetting factor alpha used in the inverse-correlation recursion, with 0 < alpha < 1. Values closer to 1 yield smoother tracking; smaller values adapt faster. initial_inv_rx : array_like of complex Initial inverse correlation matrix P(0) with shape (M + 1, M + 1). Typical choices are scaled identities, e.g. delta^{-1} I. step : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. safe_eps : float, optional Small positive constant used to guard denominators in the matrix recursion. Default is 1e-12.

Notes

Complex-valued This implementation assumes complex arithmetic (supports_complex=True), with the a priori output computed as y[k] = w^H[k] x_k.

Recursion (as implemented) Let the regressor vector be

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1},$$

and define the output and a priori error as

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Maintain an estimate ``P[k] \approx R_x^{-1}`` using a normalized rank-1 update.
With

$$p_k = P[k] x_k, \qquad \phi_k = x_k^H p_k,$$

the denominator is

$$\mathrm{denom}_k = \frac{1-\alpha}{\alpha} + \phi_k,$$

and the update used here is

$$P[k+1] =

\frac{1}{1-\alpha} \left( P[k] - \frac{p_k p_k^H}{\mathrm{denom}_k} \right).$$

The coefficient update uses the preconditioned regressor ``P[k+1] x_k``:

$$w[k+1] = w[k] + \mu\, e^*[k] \, (P[k+1] x_k).$$

Relationship to RLS The recursion for P is algebraically similar to an RLS covariance update with a particular normalization; however, the coefficient update remains LMS-like, controlled by the step size mu.

References


LMSNewton( filter_order: int, alpha: float, initial_inv_rx: numpy.ndarray, step: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
117    def __init__(
118        self,
119        filter_order: int,
120        alpha: float,
121        initial_inv_rx: np.ndarray,
122        step: float = 1e-2,
123        w_init: Optional[ArrayLike] = None,
124        *,
125        safe_eps: float = 1e-12,
126    ) -> None:
127        super().__init__(filter_order=int(filter_order), w_init=w_init)
128
129        self.alpha = float(alpha)
130        if not (0.0 < self.alpha < 1.0):
131            raise ValueError(f"alpha must satisfy 0 < alpha < 1. Got alpha={self.alpha}.")
132
133        P0 = np.asarray(initial_inv_rx, dtype=complex)
134        n_taps = int(filter_order) + 1
135        if P0.shape != (n_taps, n_taps):
136            raise ValueError(
137                f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}."
138            )
139        self.inv_rx = P0
140
141        self.step_size = float(step)
142        self._safe_eps = float(safe_eps)
supports_complex: bool = True
alpha: float
step_size: float
inv_rx: numpy.ndarray
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False) -> pydaptivefiltering.base.OptimizationResult:
144    @validate_input
145    def optimize(
146        self,
147        input_signal: np.ndarray,
148        desired_signal: np.ndarray,
149        verbose: bool = False,
150    ) -> OptimizationResult:
151        """
152        Executes the LMS-Newton adaptation loop over paired input/desired sequences.
153
154        Parameters
155        ----------
156        input_signal : array_like of complex
157            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
158        desired_signal : array_like of complex
159            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
160        verbose : bool, optional
161            If True, prints the total runtime after completion.
162
163        Returns
164        -------
165        OptimizationResult
166            Result object with fields:
167            - outputs : ndarray of complex, shape ``(N,)``
168                Scalar output sequence, ``y[k] = w^H[k] x_k``.
169            - errors : ndarray of complex, shape ``(N,)``
170                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
171            - coefficients : ndarray of complex
172                Coefficient history recorded by the base class.
173            - error_type : str
174                Set to ``"a_priori"``.
175        """
176        tic: float = perf_counter()
177
178        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
179        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
180
181        n_samples: int = int(x.size)
182        m: int = int(self.filter_order)
183
184        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
185        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
186
187        x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex)
188        x_padded[m:] = x
189
190        for k in range(n_samples):
191            x_k: np.ndarray = x_padded[k : k + m + 1][::-1]
192
193            y_k: complex = complex(np.vdot(self.w, x_k))
194            outputs[k] = y_k
195
196            e_k: complex = d[k] - y_k
197            errors[k] = e_k
198
199            x_col: np.ndarray = x_k.reshape(-1, 1)
200            Px: np.ndarray = self.inv_rx @ x_col
201            phi: complex = (x_col.conj().T @ Px).item()
202
203            denom: complex = ((1.0 - self.alpha) / self.alpha) + phi
204            if abs(denom) < self._safe_eps:
205                denom = denom + (self._safe_eps + 0.0j)
206
207            self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.alpha)
208
209            self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel()
210
211            self._record_history()
212
213        runtime_s: float = perf_counter() - tic
214        if verbose:
215            print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms")
216
217        return self._pack_results(
218            outputs=outputs,
219            errors=errors,
220            runtime_s=runtime_s,
221            error_type="a_priori",
222        )

Executes the LMS-Newton adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori".

class Power2ErrorLMS(pydaptivefiltering.AdaptiveFilter):
 29class Power2ErrorLMS(AdaptiveFilter):
 30    """
 31    Power-of-Two Error LMS adaptive filter (real-valued).
 32
 33    LMS variant in which the instantaneous a priori error is quantized to a
 34    power-of-two level (with special cases for large and very small errors),
 35    aiming to reduce computational complexity in fixed-point / low-cost
 36    implementations.
 37
 38    Parameters
 39    ----------
 40    filter_order : int
 41        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 42    bd : int
 43        Word length (number of bits) used to define the small-error threshold
 44        ``2^{-bd+1}``.
 45    tau : float
 46        Gain factor applied when ``|e[k]|`` is very small (below ``2^{-bd+1}``).
 47    step_size : float, optional
 48        Adaptation step size ``mu``. Default is 1e-2.
 49    w_init : array_like of float, optional
 50        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 51        initializes with zeros.
 52
 53    Notes
 54    -----
 55    Real-valued only
 56        This implementation is restricted to real-valued signals and coefficients
 57        (``supports_complex=False``). The constraint is enforced via
 58        ``@ensure_real_signals`` on :meth:`optimize`.
 59
 60    Signal model and LMS update
 61        Let the regressor vector be
 62
 63        .. math::
 64            x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{R}^{M+1},
 65
 66        with output and a priori error
 67
 68        .. math::
 69            y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k].
 70
 71        The update uses a quantized error ``q(e[k])``:
 72
 73        .. math::
 74            w[k+1] = w[k] + 2\\mu\\, q(e[k])\\, x_k.
 75
 76    Error quantization (as implemented)
 77        Define the small-error threshold
 78
 79        .. math::
 80            \\epsilon = 2^{-bd+1}.
 81
 82        Then the quantizer is
 83
 84        .. math::
 85            q(e) =
 86            \\begin{cases}
 87                \\operatorname{sign}(e), & |e| \\ge 1, \\\\
 88                \\tau\\,\\operatorname{sign}(e), & |e| < \\epsilon, \\\\
 89                2^{\\lfloor \\log_2(|e|) \\rfloor}\\,\\operatorname{sign}(e),
 90                & \\text{otherwise.}
 91            \\end{cases}
 92
 93        Note that ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` then
 94        ``q(e[k]) = 0`` and the update is null.
 95
 96    References
 97    ----------
 98    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 99       Implementation*, 5th ed., Algorithm 4.1 (modified complexity-reduced LMS variants).
100    """
101
102    supports_complex: bool = False
103
104    def __init__(
105        self,
106        filter_order: int,
107        bd: int,
108        tau: float,
109        step_size: float = 1e-2,
110        w_init: Optional[ArrayLike] = None,
111    ) -> None:
112        super().__init__(filter_order=int(filter_order), w_init=w_init)
113        self.bd = int(bd)
114        self.tau = float(tau)
115        self.step_size = float(step_size)
116
117        if self.bd <= 0:
118            raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.")
119
120    @validate_input
121    @ensure_real_signals
122    def optimize(
123        self,
124        input_signal: np.ndarray,
125        desired_signal: np.ndarray,
126        verbose: bool = False,
127        return_internal_states: bool = False,
128    ) -> OptimizationResult:
129        """
130        Executes the Power-of-Two Error LMS adaptation loop over paired sequences.
131
132        Parameters
133        ----------
134        input_signal : array_like of float
135            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
136        desired_signal : array_like of float
137            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
138        verbose : bool, optional
139            If True, prints the total runtime after completion.
140        return_internal_states : bool, optional
141            If True, includes the last internal states in ``result.extra``:
142            ``"last_quantized_error"`` (``q(e[k])``) and ``"small_threshold"``
143            (``2^{-bd+1}``).
144
145        Returns
146        -------
147        OptimizationResult
148            Result object with fields:
149            - outputs : ndarray of float, shape ``(N,)``
150                Scalar output sequence, ``y[k] = w^T[k] x_k``.
151            - errors : ndarray of float, shape ``(N,)``
152                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
153            - coefficients : ndarray of float
154                Coefficient history recorded by the base class.
155            - error_type : str
156                Set to ``"a_priori"``.
157            - extra : dict, optional
158                Present only if ``return_internal_states=True``.
159        """
160        t0 = perf_counter()
161
162        x = np.asarray(input_signal, dtype=np.float64).ravel()
163        d = np.asarray(desired_signal, dtype=np.float64).ravel()
164
165        n_samples = int(x.size)
166        m = int(self.filter_order)
167
168        outputs = np.zeros(n_samples, dtype=np.float64)
169        errors = np.zeros(n_samples, dtype=np.float64)
170
171        x_padded = np.zeros(n_samples + m, dtype=np.float64)
172        x_padded[m:] = x
173
174        last_qe: Optional[float] = None
175        small_thr = 2.0 ** (-self.bd + 1)
176
177        for k in range(n_samples):
178            x_k = x_padded[k : k + m + 1][::-1]
179
180            y_k = float(np.dot(self.w, x_k))
181            outputs[k] = y_k
182
183            e_k = float(d[k] - y_k)
184            errors[k] = e_k
185
186            abs_error = abs(e_k)
187            if abs_error >= 1.0:
188                qe = float(np.sign(e_k))
189            elif abs_error < small_thr:
190                qe = float(self.tau * np.sign(e_k))
191            else:
192                qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k))
193
194            last_qe = qe
195
196            self.w = self.w + (2.0 * self.step_size) * qe * x_k
197            self._record_history()
198
199        runtime_s = float(perf_counter() - t0)
200        if verbose:
201            print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms")
202
203        extra: Optional[Dict[str, Any]] = None
204        if return_internal_states:
205            extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)}
206
207        return self._pack_results(
208            outputs=outputs,
209            errors=errors,
210            runtime_s=runtime_s,
211            error_type="a_priori",
212            extra=extra,
213        )

Power-of-Two Error LMS adaptive filter (real-valued).

LMS variant in which the instantaneous a priori error is quantized to a power-of-two level (with special cases for large and very small errors), aiming to reduce computational complexity in fixed-point / low-cost implementations.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. bd : int Word length (number of bits) used to define the small-error threshold 2^{-bd+1}. tau : float Gain factor applied when |e[k]| is very small (below 2^{-bd+1}). step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Signal model and LMS update Let the regressor vector be

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{R}^{M+1},$$

with output and a priori error

$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$

The update uses a quantized error ``q(e[k])``:

$$w[k+1] = w[k] + 2\mu\, q(e[k])\, x_k.$$

Error quantization (as implemented) Define the small-error threshold

$$\epsilon = 2^{-bd+1}.$$

Then the quantizer is

$$q(e) =

\begin{cases} \operatorname{sign}(e), & |e| \ge 1, \ \tau\,\operatorname{sign}(e), & |e| < \epsilon, \ 2^{\lfloor \log_2(|e|) \rfloor}\,\operatorname{sign}(e), & \text{otherwise.} \end{cases}$$

Note that ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` then
``q(e[k]) = 0`` and the update is null.

References


Power2ErrorLMS( filter_order: int, bd: int, tau: float, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
104    def __init__(
105        self,
106        filter_order: int,
107        bd: int,
108        tau: float,
109        step_size: float = 1e-2,
110        w_init: Optional[ArrayLike] = None,
111    ) -> None:
112        super().__init__(filter_order=int(filter_order), w_init=w_init)
113        self.bd = int(bd)
114        self.tau = float(tau)
115        self.step_size = float(step_size)
116
117        if self.bd <= 0:
118            raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.")
supports_complex: bool = False
bd
tau
step_size
@validate_input
@ensure_real_signals
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
120    @validate_input
121    @ensure_real_signals
122    def optimize(
123        self,
124        input_signal: np.ndarray,
125        desired_signal: np.ndarray,
126        verbose: bool = False,
127        return_internal_states: bool = False,
128    ) -> OptimizationResult:
129        """
130        Executes the Power-of-Two Error LMS adaptation loop over paired sequences.
131
132        Parameters
133        ----------
134        input_signal : array_like of float
135            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
136        desired_signal : array_like of float
137            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
138        verbose : bool, optional
139            If True, prints the total runtime after completion.
140        return_internal_states : bool, optional
141            If True, includes the last internal states in ``result.extra``:
142            ``"last_quantized_error"`` (``q(e[k])``) and ``"small_threshold"``
143            (``2^{-bd+1}``).
144
145        Returns
146        -------
147        OptimizationResult
148            Result object with fields:
149            - outputs : ndarray of float, shape ``(N,)``
150                Scalar output sequence, ``y[k] = w^T[k] x_k``.
151            - errors : ndarray of float, shape ``(N,)``
152                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
153            - coefficients : ndarray of float
154                Coefficient history recorded by the base class.
155            - error_type : str
156                Set to ``"a_priori"``.
157            - extra : dict, optional
158                Present only if ``return_internal_states=True``.
159        """
160        t0 = perf_counter()
161
162        x = np.asarray(input_signal, dtype=np.float64).ravel()
163        d = np.asarray(desired_signal, dtype=np.float64).ravel()
164
165        n_samples = int(x.size)
166        m = int(self.filter_order)
167
168        outputs = np.zeros(n_samples, dtype=np.float64)
169        errors = np.zeros(n_samples, dtype=np.float64)
170
171        x_padded = np.zeros(n_samples + m, dtype=np.float64)
172        x_padded[m:] = x
173
174        last_qe: Optional[float] = None
175        small_thr = 2.0 ** (-self.bd + 1)
176
177        for k in range(n_samples):
178            x_k = x_padded[k : k + m + 1][::-1]
179
180            y_k = float(np.dot(self.w, x_k))
181            outputs[k] = y_k
182
183            e_k = float(d[k] - y_k)
184            errors[k] = e_k
185
186            abs_error = abs(e_k)
187            if abs_error >= 1.0:
188                qe = float(np.sign(e_k))
189            elif abs_error < small_thr:
190                qe = float(self.tau * np.sign(e_k))
191            else:
192                qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k))
193
194            last_qe = qe
195
196            self.w = self.w + (2.0 * self.step_size) * qe * x_k
197            self._record_history()
198
199        runtime_s = float(perf_counter() - t0)
200        if verbose:
201            print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms")
202
203        extra: Optional[Dict[str, Any]] = None
204        if return_internal_states:
205            extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)}
206
207        return self._pack_results(
208            outputs=outputs,
209            errors=errors,
210            runtime_s=runtime_s,
211            error_type="a_priori",
212            extra=extra,
213        )

Executes the Power-of-Two Error LMS adaptation loop over paired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "last_quantized_error" (q(e[k])) and "small_threshold" (2^{-bd+1}).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence, y[k] = w^T[k] x_k. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class TDomainLMS(pydaptivefiltering.AdaptiveFilter):
 28class TDomainLMS(AdaptiveFilter):
 29    """
 30    Transform-Domain LMS with a user-provided transform matrix.
 31
 32    Generic transform-domain LMS algorithm (Diniz, Alg. 4.4) parameterized by a
 33    transform matrix ``T``. At each iteration, the time-domain regressor is
 34    mapped to the transform domain, adaptation is performed with per-bin
 35    normalization using a smoothed power estimate, and time-domain coefficients
 36    are recovered from the transform-domain weights.
 37
 38    Parameters
 39    ----------
 40    filter_order : int
 41        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 42        The transform size must be ``(M + 1, M + 1)``.
 43    gamma : float
 44        Regularization factor ``gamma`` used in the per-bin normalization
 45        denominator to avoid division by zero (or near-zero power).
 46    alpha : float
 47        Smoothing factor ``alpha`` for the transform-bin power estimate,
 48        typically close to 1.
 49    initial_power : float
 50        Initial power estimate used to initialize all transform bins.
 51    transform_matrix : array_like of complex
 52        Transform matrix ``T`` with shape ``(M + 1, M + 1)``.
 53        Typically unitary (``T^H T = I``).
 54    step_size : float, optional
 55        Adaptation step size ``mu``. Default is 1e-2.
 56    w_init : array_like of complex, optional
 57        Initial **time-domain** coefficient vector ``w(0)`` with shape ``(M + 1,)``.
 58        If None, initializes with zeros.
 59    assume_unitary : bool, optional
 60        If True (default), maps transform-domain weights back to the time domain
 61        using ``w = T^H w_T`` (fast). If False, uses a pseudo-inverse mapping
 62        ``w = pinv(T)^H w_T`` (slower but works for non-unitary ``T``).
 63
 64    Notes
 65    -----
 66    At iteration ``k``, form the time-domain regressor vector (newest sample first):
 67
 68    .. math::
 69        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 70
 71    Define the transform-domain regressor:
 72
 73    .. math::
 74        z_k = T x_k.
 75
 76    Adaptation is performed in the transform domain with weights ``w_T[k]``.
 77    The a priori output and error are
 78
 79    .. math::
 80        y[k] = w_T^H[k] z_k, \\qquad e[k] = d[k] - y[k].
 81
 82    A smoothed per-bin power estimate ``p[k]`` is updated as
 83
 84    .. math::
 85        p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1],
 86
 87    where ``|z_k|^2`` is taken element-wise.
 88
 89    The normalized transform-domain LMS update used here is
 90
 91    .. math::
 92        w_T[k+1] = w_T[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]},
 93
 94    with element-wise division.
 95
 96    Mapping back to time domain
 97        If ``T`` is unitary (``T^H T = I``), then the inverse mapping is
 98
 99        .. math::
100            w[k] = T^H w_T[k].
101
102        If ``T`` is not unitary and ``assume_unitary=False``, this implementation
103        uses the pseudo-inverse mapping:
104
105        .. math::
106            w[k] = \\operatorname{pinv}(T)^H w_T[k].
107
108    Implementation details
109        - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient
110          history recorded by the base class (``self.w`` after mapping back).
111        - If ``return_internal_states=True``, the transform-domain coefficient history
112          is returned in ``result.extra["coefficients_transform"]``.
113
114    References
115    ----------
116    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
117       Implementation*, 5th ed., Algorithm 4.4.
118    """
119
120    supports_complex: bool = True
121
122    def __init__(
123        self,
124        filter_order: int,
125        gamma: float,
126        alpha: float,
127        initial_power: float,
128        transform_matrix: np.ndarray,
129        step_size: float = 1e-2,
130        w_init: Optional[ArrayLike] = None,
131        *,
132        assume_unitary: bool = True,
133    ) -> None:
134        super().__init__(filter_order=int(filter_order), w_init=w_init)
135
136        self.gamma = float(gamma)
137        self.alpha = float(alpha)
138        self.step_size = float(step_size)
139
140        self.N = int(self.filter_order + 1)
141
142        T = np.asarray(transform_matrix, dtype=complex)
143        if T.shape != (self.N, self.N):
144            raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.")
145
146        self.T = T
147        self._assume_unitary = bool(assume_unitary)
148
149        # transform-domain weights (start from time-domain w)
150        self.w_T = self.T @ np.asarray(self.w, dtype=complex)
151
152        # power estimate per transform bin
153        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
154
155        # optional transform-domain history
156        self._w_history_T: List[np.ndarray] = [self.w_T.copy()]
157
158    def _to_time_domain(self, w_T: np.ndarray) -> np.ndarray:
159        """Map transform-domain weights to time-domain weights."""
160        if self._assume_unitary:
161            return self.T.conj().T @ w_T
162        # fallback for non-unitary transforms (more expensive)
163        T_pinv = np.linalg.pinv(self.T)
164        return T_pinv.conj().T @ w_T
165
166    @validate_input
167    def optimize(
168        self,
169        input_signal: np.ndarray,
170        desired_signal: np.ndarray,
171        verbose: bool = False,
172        return_internal_states: bool = False,
173    ) -> OptimizationResult:
174        """
175        Executes the Transform-Domain LMS adaptation loop.
176
177        Parameters
178        ----------
179        input_signal : array_like of complex
180            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
181        desired_signal : array_like of complex
182            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, includes transform-domain internal states in ``result.extra``:
187            ``"coefficients_transform"``, ``"power_vector_last"``,
188            ``"transform_matrix"``, and ``"assume_unitary"``.
189
190        Returns
191        -------
192        OptimizationResult
193            Result object with fields:
194            - outputs : ndarray of complex, shape ``(N,)``
195                Scalar a priori output sequence, ``y[k] = w_T^H[k] z_k``.
196            - errors : ndarray of complex, shape ``(N,)``
197                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
198            - coefficients : ndarray of complex
199                **Time-domain** coefficient history recorded by the base class.
200            - error_type : str
201                Set to ``"a_priori"``.
202            - extra : dict, optional
203                Present only if ``return_internal_states=True`` with:
204                - ``coefficients_transform`` : ndarray of complex
205                    Transform-domain coefficient history.
206                - ``power_vector_last`` : ndarray of float
207                    Final per-bin power estimate ``p[k]``.
208                - ``transform_matrix`` : ndarray of complex
209                    The transform matrix ``T`` used (shape ``(M+1, M+1)``).
210                - ``assume_unitary`` : bool
211                    Whether the inverse mapping assumed ``T`` is unitary.
212        """
213        t0 = perf_counter()
214
215        x = np.asarray(input_signal, dtype=complex).ravel()
216        d = np.asarray(desired_signal, dtype=complex).ravel()
217
218        n_samples = int(d.size)
219        m = int(self.filter_order)
220
221        outputs = np.zeros(n_samples, dtype=complex)
222        errors = np.zeros(n_samples, dtype=complex)
223
224        x_padded = np.zeros(n_samples + m, dtype=complex)
225        x_padded[m:] = x
226
227        w_hist_T: List[np.ndarray] = [self.w_T.copy()]
228
229        for k in range(n_samples):
230            x_k = x_padded[k : k + m + 1][::-1]
231            z_k = self.T @ x_k
232
233            self.power_vector = (
234                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
235            )
236
237            y_k = complex(np.vdot(self.w_T, z_k))
238            outputs[k] = y_k
239
240            e_k = d[k] - y_k
241            errors[k] = e_k
242
243            denom = self.gamma + self.power_vector
244            self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom)
245
246            self.w = self._to_time_domain(self.w_T)
247
248            self._record_history()
249            w_hist_T.append(self.w_T.copy())
250
251        runtime_s = float(perf_counter() - t0)
252        if verbose:
253            print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms")
254
255        extra: Optional[Dict[str, Any]] = None
256        if return_internal_states:
257            extra = {
258                "coefficients_transform": np.asarray(w_hist_T),
259                "power_vector_last": self.power_vector.copy(),
260                "transform_matrix": self.T.copy(),
261                "assume_unitary": self._assume_unitary,
262            }
263
264        return self._pack_results(
265            outputs=outputs,
266            errors=errors,
267            runtime_s=runtime_s,
268            error_type="a_priori",
269            extra=extra,
270        )

Transform-Domain LMS with a user-provided transform matrix.

Generic transform-domain LMS algorithm (Diniz, Alg. 4.4) parameterized by a transform matrix T. At each iteration, the time-domain regressor is mapped to the transform domain, adaptation is performed with per-bin normalization using a smoothed power estimate, and time-domain coefficients are recovered from the transform-domain weights.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. The transform size must be (M + 1, M + 1). gamma : float Regularization factor gamma used in the per-bin normalization denominator to avoid division by zero (or near-zero power). alpha : float Smoothing factor alpha for the transform-bin power estimate, typically close to 1. initial_power : float Initial power estimate used to initialize all transform bins. transform_matrix : array_like of complex Transform matrix T with shape (M + 1, M + 1). Typically unitary (T^H T = I). step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial **time-domain** coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. assume_unitary : bool, optional If True (default), maps transform-domain weights back to the time domain using w = T^H w_T (fast). If False, uses a pseudo-inverse mapping w = pinv(T)^H w_T (slower but works for non-unitary T).

Notes

At iteration k, form the time-domain regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

Define the transform-domain regressor:

$$z_k = T x_k.$$

Adaptation is performed in the transform domain with weights w_T[k]. The a priori output and error are

$$y[k] = w_T^H[k] z_k, \qquad e[k] = d[k] - y[k].$$

A smoothed per-bin power estimate p[k] is updated as

$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$

where |z_k|^2 is taken element-wise.

The normalized transform-domain LMS update used here is

$$w_T[k+1] = w_T[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$

with element-wise division.

Mapping back to time domain If T is unitary (T^H T = I), then the inverse mapping is

$$w[k] = T^H w_T[k].$$

If ``T`` is not unitary and ``assume_unitary=False``, this implementation
uses the pseudo-inverse mapping:

$$w[k] = \operatorname{pinv}(T)^H w_T[k].$$

Implementation details - OptimizationResult.coefficients stores the time-domain coefficient history recorded by the base class (self.w after mapping back). - If return_internal_states=True, the transform-domain coefficient history is returned in result.extra["coefficients_transform"].

References


TDomainLMS( filter_order: int, gamma: float, alpha: float, initial_power: float, transform_matrix: numpy.ndarray, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, assume_unitary: bool = True)
122    def __init__(
123        self,
124        filter_order: int,
125        gamma: float,
126        alpha: float,
127        initial_power: float,
128        transform_matrix: np.ndarray,
129        step_size: float = 1e-2,
130        w_init: Optional[ArrayLike] = None,
131        *,
132        assume_unitary: bool = True,
133    ) -> None:
134        super().__init__(filter_order=int(filter_order), w_init=w_init)
135
136        self.gamma = float(gamma)
137        self.alpha = float(alpha)
138        self.step_size = float(step_size)
139
140        self.N = int(self.filter_order + 1)
141
142        T = np.asarray(transform_matrix, dtype=complex)
143        if T.shape != (self.N, self.N):
144            raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.")
145
146        self.T = T
147        self._assume_unitary = bool(assume_unitary)
148
149        # transform-domain weights (start from time-domain w)
150        self.w_T = self.T @ np.asarray(self.w, dtype=complex)
151
152        # power estimate per transform bin
153        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
154
155        # optional transform-domain history
156        self._w_history_T: List[np.ndarray] = [self.w_T.copy()]
supports_complex: bool = True
gamma
alpha
step_size
N
T
w_T
power_vector
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
166    @validate_input
167    def optimize(
168        self,
169        input_signal: np.ndarray,
170        desired_signal: np.ndarray,
171        verbose: bool = False,
172        return_internal_states: bool = False,
173    ) -> OptimizationResult:
174        """
175        Executes the Transform-Domain LMS adaptation loop.
176
177        Parameters
178        ----------
179        input_signal : array_like of complex
180            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
181        desired_signal : array_like of complex
182            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, includes transform-domain internal states in ``result.extra``:
187            ``"coefficients_transform"``, ``"power_vector_last"``,
188            ``"transform_matrix"``, and ``"assume_unitary"``.
189
190        Returns
191        -------
192        OptimizationResult
193            Result object with fields:
194            - outputs : ndarray of complex, shape ``(N,)``
195                Scalar a priori output sequence, ``y[k] = w_T^H[k] z_k``.
196            - errors : ndarray of complex, shape ``(N,)``
197                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
198            - coefficients : ndarray of complex
199                **Time-domain** coefficient history recorded by the base class.
200            - error_type : str
201                Set to ``"a_priori"``.
202            - extra : dict, optional
203                Present only if ``return_internal_states=True`` with:
204                - ``coefficients_transform`` : ndarray of complex
205                    Transform-domain coefficient history.
206                - ``power_vector_last`` : ndarray of float
207                    Final per-bin power estimate ``p[k]``.
208                - ``transform_matrix`` : ndarray of complex
209                    The transform matrix ``T`` used (shape ``(M+1, M+1)``).
210                - ``assume_unitary`` : bool
211                    Whether the inverse mapping assumed ``T`` is unitary.
212        """
213        t0 = perf_counter()
214
215        x = np.asarray(input_signal, dtype=complex).ravel()
216        d = np.asarray(desired_signal, dtype=complex).ravel()
217
218        n_samples = int(d.size)
219        m = int(self.filter_order)
220
221        outputs = np.zeros(n_samples, dtype=complex)
222        errors = np.zeros(n_samples, dtype=complex)
223
224        x_padded = np.zeros(n_samples + m, dtype=complex)
225        x_padded[m:] = x
226
227        w_hist_T: List[np.ndarray] = [self.w_T.copy()]
228
229        for k in range(n_samples):
230            x_k = x_padded[k : k + m + 1][::-1]
231            z_k = self.T @ x_k
232
233            self.power_vector = (
234                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
235            )
236
237            y_k = complex(np.vdot(self.w_T, z_k))
238            outputs[k] = y_k
239
240            e_k = d[k] - y_k
241            errors[k] = e_k
242
243            denom = self.gamma + self.power_vector
244            self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom)
245
246            self.w = self._to_time_domain(self.w_T)
247
248            self._record_history()
249            w_hist_T.append(self.w_T.copy())
250
251        runtime_s = float(perf_counter() - t0)
252        if verbose:
253            print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms")
254
255        extra: Optional[Dict[str, Any]] = None
256        if return_internal_states:
257            extra = {
258                "coefficients_transform": np.asarray(w_hist_T),
259                "power_vector_last": self.power_vector.copy(),
260                "transform_matrix": self.T.copy(),
261                "assume_unitary": self._assume_unitary,
262            }
263
264        return self._pack_results(
265            outputs=outputs,
266            errors=errors,
267            runtime_s=runtime_s,
268            error_type="a_priori",
269            extra=extra,
270        )

Executes the Transform-Domain LMS adaptation loop.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes transform-domain internal states in result.extra: "coefficients_transform", "power_vector_last", "transform_matrix", and "assume_unitary".

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar a priori output sequence, y[k] = w_T^H[k] z_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Time-domain coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - coefficients_transform : ndarray of complex Transform-domain coefficient history. - power_vector_last : ndarray of float Final per-bin power estimate p[k]. - transform_matrix : ndarray of complex The transform matrix T used (shape (M+1, M+1)). - assume_unitary : bool Whether the inverse mapping assumed T is unitary.

class TDomainDCT(pydaptivefiltering.AdaptiveFilter):
 30class TDomainDCT(AdaptiveFilter):
 31    """
 32    Transform-Domain LMS using an orthonormal DCT (complex-valued).
 33
 34    Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain
 35    regressor vector is mapped to a decorrelated transform domain using an
 36    orthonormal Discrete Cosine Transform (DCT). Adaptation is performed in the
 37    transform domain with per-bin normalization based on a smoothed power
 38    estimate. The time-domain coefficient vector is recovered from the
 39    transform-domain weights.
 40
 41    Parameters
 42    ----------
 43    filter_order : int
 44        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 45    gamma : float
 46        Regularization factor ``gamma`` used in the per-bin normalization
 47        denominator to avoid division by zero (or near-zero power).
 48    alpha : float
 49        Smoothing factor ``alpha`` for the transform-bin power estimate,
 50        typically close to 1.
 51    initial_power : float
 52        Initial power estimate used to initialize all transform bins.
 53    step_size : float, optional
 54        Adaptation step size ``mu``. Default is 1e-2.
 55    w_init : array_like of complex, optional
 56        Initial time-domain coefficient vector ``w(0)`` with shape ``(M + 1,)``.
 57        If None, initializes with zeros.
 58
 59    Notes
 60    -----
 61    At iteration ``k``, form the time-domain regressor vector (newest sample first):
 62
 63    .. math::
 64        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 65
 66    Let ``T`` be the orthonormal DCT matrix of size ``(M+1) x (M+1)``
 67    (real-valued, with ``T^T T = I``). The transform-domain regressor is
 68
 69    .. math::
 70        z_k = T x_k.
 71
 72    Adaptation is performed in the transform domain with weights ``w_z[k]``.
 73    The a priori output and error are
 74
 75    .. math::
 76        y[k] = w_z^H[k] z_k, \\qquad e[k] = d[k] - y[k].
 77
 78    A smoothed per-bin power estimate ``p[k]`` is updated as
 79
 80    .. math::
 81        p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1],
 82
 83    where ``|z_k|^2`` is taken element-wise (i.e., ``|z_{k,i}|^2``).
 84
 85    The normalized transform-domain LMS update used here is
 86
 87    .. math::
 88        w_z[k+1] = w_z[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]},
 89
 90    where the division is element-wise.
 91
 92    The time-domain coefficients are recovered using orthonormality of ``T``:
 93
 94    .. math::
 95        w[k] = T^T w_z[k].
 96
 97    Implementation details
 98        - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient
 99          history recorded by the base class (``self.w`` after the inverse transform).
100        - If ``return_internal_states=True``, the transform-domain coefficient history
101          is returned in ``result.extra["coefficients_dct"]``.
102
103    References
104    ----------
105    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
106       Implementation*, 5th ed., Algorithm 4.4.
107    """
108
109    supports_complex: bool = True
110
111    def __init__(
112        self,
113        filter_order: int,
114        gamma: float,
115        alpha: float,
116        initial_power: float,
117        step_size: float = 1e-2,
118        w_init: Optional[ArrayLike] = None,
119    ) -> None:
120        super().__init__(filter_order=int(filter_order), w_init=w_init)
121
122        self.gamma = float(gamma)
123        self.alpha = float(alpha)
124        self.step_size = float(step_size)
125
126        self.N = int(self.filter_order + 1)
127
128        self.T = dct(np.eye(self.N), norm="ortho", axis=0)
129
130        self.w_dct = self.T @ np.asarray(self.w, dtype=complex)
131
132        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
133
134        self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()]
135
136    @validate_input
137    def optimize(
138        self,
139        input_signal: np.ndarray,
140        desired_signal: np.ndarray,
141        verbose: bool = False,
142        return_internal_states: bool = False,
143    ) -> OptimizationResult:
144        """
145        Executes the Transform-Domain LMS (DCT) adaptation loop.
146
147        Parameters
148        ----------
149        input_signal : array_like of complex
150            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
151        desired_signal : array_like of complex
152            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
153        verbose : bool, optional
154            If True, prints the total runtime after completion.
155        return_internal_states : bool, optional
156            If True, includes transform-domain internal states in ``result.extra``:
157            ``"coefficients_dct"``, ``"power_vector_last"``, and ``"dct_matrix"``.
158
159        Returns
160        -------
161        OptimizationResult
162            Result object with fields:
163            - outputs : ndarray of complex, shape ``(N,)``
164                Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``.
165            - errors : ndarray of complex, shape ``(N,)``
166                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
167            - coefficients : ndarray of complex
168                **Time-domain** coefficient history recorded by the base class.
169            - error_type : str
170                Set to ``"a_priori"``.
171            - extra : dict, optional
172                Present only if ``return_internal_states=True`` with:
173                - ``coefficients_dct`` : ndarray of complex
174                    Transform-domain coefficient history.
175                - ``power_vector_last`` : ndarray of float
176                    Final per-bin power estimate ``p[k]``.
177                - ``dct_matrix`` : ndarray of float
178                    The DCT matrix ``T`` used (shape ``(M+1, M+1)``).
179        """
180        t0 = perf_counter()
181
182        x = np.asarray(input_signal, dtype=complex).ravel()
183        d = np.asarray(desired_signal, dtype=complex).ravel()
184
185        n_samples = int(d.size)
186        m = int(self.filter_order)
187
188        outputs = np.zeros(n_samples, dtype=complex)
189        errors = np.zeros(n_samples, dtype=complex)
190
191        x_padded = np.zeros(n_samples + m, dtype=complex)
192        x_padded[m:] = x
193
194        w_hist_dct: List[np.ndarray] = [self.w_dct.copy()]
195
196        for k in range(n_samples):
197            x_k = x_padded[k : k + m + 1][::-1]
198            z_k = self.T @ x_k
199
200            self.power_vector = (
201                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
202            )
203
204            y_k = complex(np.vdot(self.w_dct, z_k))
205            outputs[k] = y_k
206
207            e_k = d[k] - y_k
208            errors[k] = e_k
209
210            denom = self.gamma + self.power_vector
211            self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom)
212
213            self.w = self.T.T @ self.w_dct
214
215            self._record_history()
216            w_hist_dct.append(self.w_dct.copy())
217
218        runtime_s = float(perf_counter() - t0)
219        if verbose:
220            print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms")
221
222        extra: Optional[Dict[str, Any]] = None
223        if return_internal_states:
224            extra = {
225                "coefficients_dct": np.asarray(w_hist_dct),
226                "power_vector_last": self.power_vector.copy(),
227                "dct_matrix": self.T.copy(),
228            }
229
230        return self._pack_results(
231            outputs=outputs,
232            errors=errors,
233            runtime_s=runtime_s,
234            error_type="a_priori",
235            extra=extra,
236        )

Transform-Domain LMS using an orthonormal DCT (complex-valued).

Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain regressor vector is mapped to a decorrelated transform domain using an orthonormal Discrete Cosine Transform (DCT). Adaptation is performed in the transform domain with per-bin normalization based on a smoothed power estimate. The time-domain coefficient vector is recovered from the transform-domain weights.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. gamma : float Regularization factor gamma used in the per-bin normalization denominator to avoid division by zero (or near-zero power). alpha : float Smoothing factor alpha for the transform-bin power estimate, typically close to 1. initial_power : float Initial power estimate used to initialize all transform bins. step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial time-domain coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the time-domain regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

Let T be the orthonormal DCT matrix of size (M+1) x (M+1) (real-valued, with T^T T = I). The transform-domain regressor is

$$z_k = T x_k.$$

Adaptation is performed in the transform domain with weights w_z[k]. The a priori output and error are

$$y[k] = w_z^H[k] z_k, \qquad e[k] = d[k] - y[k].$$

A smoothed per-bin power estimate p[k] is updated as

$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$

where |z_k|^2 is taken element-wise (i.e., |z_{k,i}|^2).

The normalized transform-domain LMS update used here is

$$w_z[k+1] = w_z[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$

where the division is element-wise.

The time-domain coefficients are recovered using orthonormality of T:

$$w[k] = T^T w_z[k].$$

Implementation details - OptimizationResult.coefficients stores the time-domain coefficient history recorded by the base class (self.w after the inverse transform). - If return_internal_states=True, the transform-domain coefficient history is returned in result.extra["coefficients_dct"].

References


TDomainDCT( filter_order: int, gamma: float, alpha: float, initial_power: float, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
111    def __init__(
112        self,
113        filter_order: int,
114        gamma: float,
115        alpha: float,
116        initial_power: float,
117        step_size: float = 1e-2,
118        w_init: Optional[ArrayLike] = None,
119    ) -> None:
120        super().__init__(filter_order=int(filter_order), w_init=w_init)
121
122        self.gamma = float(gamma)
123        self.alpha = float(alpha)
124        self.step_size = float(step_size)
125
126        self.N = int(self.filter_order + 1)
127
128        self.T = dct(np.eye(self.N), norm="ortho", axis=0)
129
130        self.w_dct = self.T @ np.asarray(self.w, dtype=complex)
131
132        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
133
134        self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()]
supports_complex: bool = True
gamma
alpha
step_size
N
T
w_dct
power_vector
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
136    @validate_input
137    def optimize(
138        self,
139        input_signal: np.ndarray,
140        desired_signal: np.ndarray,
141        verbose: bool = False,
142        return_internal_states: bool = False,
143    ) -> OptimizationResult:
144        """
145        Executes the Transform-Domain LMS (DCT) adaptation loop.
146
147        Parameters
148        ----------
149        input_signal : array_like of complex
150            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
151        desired_signal : array_like of complex
152            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
153        verbose : bool, optional
154            If True, prints the total runtime after completion.
155        return_internal_states : bool, optional
156            If True, includes transform-domain internal states in ``result.extra``:
157            ``"coefficients_dct"``, ``"power_vector_last"``, and ``"dct_matrix"``.
158
159        Returns
160        -------
161        OptimizationResult
162            Result object with fields:
163            - outputs : ndarray of complex, shape ``(N,)``
164                Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``.
165            - errors : ndarray of complex, shape ``(N,)``
166                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
167            - coefficients : ndarray of complex
168                **Time-domain** coefficient history recorded by the base class.
169            - error_type : str
170                Set to ``"a_priori"``.
171            - extra : dict, optional
172                Present only if ``return_internal_states=True`` with:
173                - ``coefficients_dct`` : ndarray of complex
174                    Transform-domain coefficient history.
175                - ``power_vector_last`` : ndarray of float
176                    Final per-bin power estimate ``p[k]``.
177                - ``dct_matrix`` : ndarray of float
178                    The DCT matrix ``T`` used (shape ``(M+1, M+1)``).
179        """
180        t0 = perf_counter()
181
182        x = np.asarray(input_signal, dtype=complex).ravel()
183        d = np.asarray(desired_signal, dtype=complex).ravel()
184
185        n_samples = int(d.size)
186        m = int(self.filter_order)
187
188        outputs = np.zeros(n_samples, dtype=complex)
189        errors = np.zeros(n_samples, dtype=complex)
190
191        x_padded = np.zeros(n_samples + m, dtype=complex)
192        x_padded[m:] = x
193
194        w_hist_dct: List[np.ndarray] = [self.w_dct.copy()]
195
196        for k in range(n_samples):
197            x_k = x_padded[k : k + m + 1][::-1]
198            z_k = self.T @ x_k
199
200            self.power_vector = (
201                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
202            )
203
204            y_k = complex(np.vdot(self.w_dct, z_k))
205            outputs[k] = y_k
206
207            e_k = d[k] - y_k
208            errors[k] = e_k
209
210            denom = self.gamma + self.power_vector
211            self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom)
212
213            self.w = self.T.T @ self.w_dct
214
215            self._record_history()
216            w_hist_dct.append(self.w_dct.copy())
217
218        runtime_s = float(perf_counter() - t0)
219        if verbose:
220            print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms")
221
222        extra: Optional[Dict[str, Any]] = None
223        if return_internal_states:
224            extra = {
225                "coefficients_dct": np.asarray(w_hist_dct),
226                "power_vector_last": self.power_vector.copy(),
227                "dct_matrix": self.T.copy(),
228            }
229
230        return self._pack_results(
231            outputs=outputs,
232            errors=errors,
233            runtime_s=runtime_s,
234            error_type="a_priori",
235            extra=extra,
236        )

Executes the Transform-Domain LMS (DCT) adaptation loop.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes transform-domain internal states in result.extra: "coefficients_dct", "power_vector_last", and "dct_matrix".

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar a priori output sequence, y[k] = w_z^H[k] z_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Time-domain coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - coefficients_dct : ndarray of complex Transform-domain coefficient history. - power_vector_last : ndarray of float Final per-bin power estimate p[k]. - dct_matrix : ndarray of float The DCT matrix T used (shape (M+1, M+1)).

class TDomainDFT(pydaptivefiltering.AdaptiveFilter):
 30class TDomainDFT(AdaptiveFilter):
 31    """
 32    Transform-Domain LMS using a unitary DFT (complex-valued).
 33
 34    Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain
 35    regressor is mapped to the frequency domain using a *unitary* Discrete
 36    Fourier Transform (DFT). Adaptation is performed in the transform domain
 37    with per-bin normalization based on a smoothed power estimate. The time-domain
 38    coefficient vector is recovered via the inverse unitary DFT.
 39
 40    Parameters
 41    ----------
 42    filter_order : int
 43        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 44        The DFT size is ``N = M + 1``.
 45    gamma : float
 46        Regularization factor ``gamma`` used in the per-bin normalization
 47        denominator to avoid division by zero (or near-zero power).
 48    alpha : float
 49        Smoothing factor ``alpha`` for the transform-bin power estimate,
 50        typically close to 1.
 51    initial_power : float
 52        Initial power estimate used to initialize all transform bins.
 53    step_size : float, optional
 54        Adaptation step size ``mu``. Default is 1e-2.
 55    w_init : array_like of complex, optional
 56        Initial time-domain coefficient vector ``w(0)`` with shape ``(M + 1,)``.
 57        If None, initializes with zeros.
 58
 59    Notes
 60    -----
 61    At iteration ``k``, form the time-domain regressor vector (newest sample first):
 62
 63    .. math::
 64        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{N}.
 65
 66    Define the *unitary* DFT (energy-preserving) transform-domain regressor:
 67
 68    .. math::
 69        z_k = \\frac{\\mathrm{DFT}(x_k)}{\\sqrt{N}}.
 70
 71    Adaptation is performed in the transform domain with weights ``w_z[k]``.
 72    The a priori output and error are
 73
 74    .. math::
 75        y[k] = w_z^H[k] z_k, \\qquad e[k] = d[k] - y[k].
 76
 77    A smoothed per-bin power estimate ``p[k]`` is updated as
 78
 79    .. math::
 80        p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1],
 81
 82    where ``|z_k|^2`` is taken element-wise.
 83
 84    The normalized transform-domain LMS update used here is
 85
 86    .. math::
 87        w_z[k+1] = w_z[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]},
 88
 89    with element-wise division.
 90
 91    The time-domain coefficients are recovered via the inverse unitary DFT:
 92
 93    .. math::
 94        w[k] = \\mathrm{IDFT}(w_z[k])\\,\\sqrt{N}.
 95
 96    Implementation details
 97        - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient
 98          history recorded by the base class (``self.w`` after inverse transform).
 99        - If ``return_internal_states=True``, the transform-domain coefficient history
100          is returned in ``result.extra["coefficients_dft"]``.
101
102    References
103    ----------
104    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
105       Implementation*, 5th ed., Algorithm 4.4.
106    """
107
108    supports_complex: bool = True
109
110    def __init__(
111        self,
112        filter_order: int,
113        gamma: float,
114        alpha: float,
115        initial_power: float,
116        step_size: float = 1e-2,
117        w_init: Optional[ArrayLike] = None,
118    ) -> None:
119        super().__init__(filter_order=int(filter_order), w_init=w_init)
120
121        self.gamma = float(gamma)
122        self.alpha = float(alpha)
123        self.step_size = float(step_size)
124
125        self.N = int(self.filter_order + 1)
126        self._sqrtN = float(np.sqrt(self.N))
127
128        self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN
129
130        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
131
132        self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()]
133
134    @validate_input
135    def optimize(
136        self,
137        input_signal: np.ndarray,
138        desired_signal: np.ndarray,
139        verbose: bool = False,
140        return_internal_states: bool = False,
141    ) -> OptimizationResult:
142        """
143        Executes the Transform-Domain LMS (DFT) adaptation loop.
144
145        Parameters
146        ----------
147        input_signal : array_like of complex
148            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
149        desired_signal : array_like of complex
150            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
151        verbose : bool, optional
152            If True, prints the total runtime after completion.
153        return_internal_states : bool, optional
154            If True, includes transform-domain internal states in ``result.extra``:
155            ``"coefficients_dft"``, ``"power_vector_last"``, and ``"sqrtN"``.
156
157        Returns
158        -------
159        OptimizationResult
160            Result object with fields:
161            - outputs : ndarray of complex, shape ``(N,)``
162                Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``.
163            - errors : ndarray of complex, shape ``(N,)``
164                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
165            - coefficients : ndarray of complex
166                **Time-domain** coefficient history recorded by the base class.
167            - error_type : str
168                Set to ``"a_priori"``.
169            - extra : dict, optional
170                Present only if ``return_internal_states=True`` with:
171                - ``coefficients_dft`` : ndarray of complex
172                    Transform-domain coefficient history.
173                - ``power_vector_last`` : ndarray of float
174                    Final per-bin power estimate ``p[k]``.
175                - ``sqrtN`` : float
176                    The unitary normalization factor ``\\sqrt{N}``.
177        """
178        t0 = perf_counter()
179
180        x = np.asarray(input_signal, dtype=complex).ravel()
181        d = np.asarray(desired_signal, dtype=complex).ravel()
182
183        n_samples = int(d.size)
184        m = int(self.filter_order)
185
186        outputs = np.zeros(n_samples, dtype=complex)
187        errors = np.zeros(n_samples, dtype=complex)
188
189        x_padded = np.zeros(n_samples + m, dtype=complex)
190        x_padded[m:] = x
191
192        w_hist_dft: List[np.ndarray] = [self.w_dft.copy()]
193
194        for k in range(n_samples):
195            x_k = x_padded[k : k + m + 1][::-1]
196            z_k = fft(x_k) / self._sqrtN
197
198            self.power_vector = (
199                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
200            )
201
202            y_k = complex(np.vdot(self.w_dft, z_k))
203            outputs[k] = y_k
204
205            e_k = d[k] - y_k
206            errors[k] = e_k
207
208            denom = self.gamma + self.power_vector
209            self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom)
210
211            self.w = ifft(self.w_dft) * self._sqrtN
212
213            self._record_history()
214            w_hist_dft.append(self.w_dft.copy())
215
216        runtime_s = float(perf_counter() - t0)
217        if verbose:
218            print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms")
219
220        extra: Optional[Dict[str, Any]] = None
221        if return_internal_states:
222            extra = {
223                "coefficients_dft": np.asarray(w_hist_dft),
224                "power_vector_last": self.power_vector.copy(),
225                "sqrtN": self._sqrtN,
226            }
227
228        return self._pack_results(
229            outputs=outputs,
230            errors=errors,
231            runtime_s=runtime_s,
232            error_type="a_priori",
233            extra=extra,
234        )

Transform-Domain LMS using a unitary DFT (complex-valued).

Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain regressor is mapped to the frequency domain using a unitary Discrete Fourier Transform (DFT). Adaptation is performed in the transform domain with per-bin normalization based on a smoothed power estimate. The time-domain coefficient vector is recovered via the inverse unitary DFT.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. The DFT size is N = M + 1. gamma : float Regularization factor gamma used in the per-bin normalization denominator to avoid division by zero (or near-zero power). alpha : float Smoothing factor alpha for the transform-bin power estimate, typically close to 1. initial_power : float Initial power estimate used to initialize all transform bins. step_size : float, optional Adaptation step size mu. Default is 1e-2. w_init : array_like of complex, optional Initial time-domain coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the time-domain regressor vector (newest sample first):

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{N}.$$

Define the unitary DFT (energy-preserving) transform-domain regressor:

$$z_k = \frac{\mathrm{DFT}(x_k)}{\sqrt{N}}.$$

Adaptation is performed in the transform domain with weights w_z[k]. The a priori output and error are

$$y[k] = w_z^H[k] z_k, \qquad e[k] = d[k] - y[k].$$

A smoothed per-bin power estimate p[k] is updated as

$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$

where |z_k|^2 is taken element-wise.

The normalized transform-domain LMS update used here is

$$w_z[k+1] = w_z[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$

with element-wise division.

The time-domain coefficients are recovered via the inverse unitary DFT:

$$w[k] = \mathrm{IDFT}(w_z[k])\,\sqrt{N}.$$

Implementation details - OptimizationResult.coefficients stores the time-domain coefficient history recorded by the base class (self.w after inverse transform). - If return_internal_states=True, the transform-domain coefficient history is returned in result.extra["coefficients_dft"].

References


TDomainDFT( filter_order: int, gamma: float, alpha: float, initial_power: float, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
110    def __init__(
111        self,
112        filter_order: int,
113        gamma: float,
114        alpha: float,
115        initial_power: float,
116        step_size: float = 1e-2,
117        w_init: Optional[ArrayLike] = None,
118    ) -> None:
119        super().__init__(filter_order=int(filter_order), w_init=w_init)
120
121        self.gamma = float(gamma)
122        self.alpha = float(alpha)
123        self.step_size = float(step_size)
124
125        self.N = int(self.filter_order + 1)
126        self._sqrtN = float(np.sqrt(self.N))
127
128        self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN
129
130        self.power_vector = np.full(self.N, float(initial_power), dtype=float)
131
132        self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()]
supports_complex: bool = True
gamma
alpha
step_size
N
w_dft
power_vector
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
134    @validate_input
135    def optimize(
136        self,
137        input_signal: np.ndarray,
138        desired_signal: np.ndarray,
139        verbose: bool = False,
140        return_internal_states: bool = False,
141    ) -> OptimizationResult:
142        """
143        Executes the Transform-Domain LMS (DFT) adaptation loop.
144
145        Parameters
146        ----------
147        input_signal : array_like of complex
148            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
149        desired_signal : array_like of complex
150            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
151        verbose : bool, optional
152            If True, prints the total runtime after completion.
153        return_internal_states : bool, optional
154            If True, includes transform-domain internal states in ``result.extra``:
155            ``"coefficients_dft"``, ``"power_vector_last"``, and ``"sqrtN"``.
156
157        Returns
158        -------
159        OptimizationResult
160            Result object with fields:
161            - outputs : ndarray of complex, shape ``(N,)``
162                Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``.
163            - errors : ndarray of complex, shape ``(N,)``
164                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
165            - coefficients : ndarray of complex
166                **Time-domain** coefficient history recorded by the base class.
167            - error_type : str
168                Set to ``"a_priori"``.
169            - extra : dict, optional
170                Present only if ``return_internal_states=True`` with:
171                - ``coefficients_dft`` : ndarray of complex
172                    Transform-domain coefficient history.
173                - ``power_vector_last`` : ndarray of float
174                    Final per-bin power estimate ``p[k]``.
175                - ``sqrtN`` : float
176                    The unitary normalization factor ``\\sqrt{N}``.
177        """
178        t0 = perf_counter()
179
180        x = np.asarray(input_signal, dtype=complex).ravel()
181        d = np.asarray(desired_signal, dtype=complex).ravel()
182
183        n_samples = int(d.size)
184        m = int(self.filter_order)
185
186        outputs = np.zeros(n_samples, dtype=complex)
187        errors = np.zeros(n_samples, dtype=complex)
188
189        x_padded = np.zeros(n_samples + m, dtype=complex)
190        x_padded[m:] = x
191
192        w_hist_dft: List[np.ndarray] = [self.w_dft.copy()]
193
194        for k in range(n_samples):
195            x_k = x_padded[k : k + m + 1][::-1]
196            z_k = fft(x_k) / self._sqrtN
197
198            self.power_vector = (
199                self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector
200            )
201
202            y_k = complex(np.vdot(self.w_dft, z_k))
203            outputs[k] = y_k
204
205            e_k = d[k] - y_k
206            errors[k] = e_k
207
208            denom = self.gamma + self.power_vector
209            self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom)
210
211            self.w = ifft(self.w_dft) * self._sqrtN
212
213            self._record_history()
214            w_hist_dft.append(self.w_dft.copy())
215
216        runtime_s = float(perf_counter() - t0)
217        if verbose:
218            print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms")
219
220        extra: Optional[Dict[str, Any]] = None
221        if return_internal_states:
222            extra = {
223                "coefficients_dft": np.asarray(w_hist_dft),
224                "power_vector_last": self.power_vector.copy(),
225                "sqrtN": self._sqrtN,
226            }
227
228        return self._pack_results(
229            outputs=outputs,
230            errors=errors,
231            runtime_s=runtime_s,
232            error_type="a_priori",
233            extra=extra,
234        )

Executes the Transform-Domain LMS (DFT) adaptation loop.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes transform-domain internal states in result.extra: "coefficients_dft", "power_vector_last", and "sqrtN".

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar a priori output sequence, y[k] = w_z^H[k] z_k. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Time-domain coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - coefficients_dft : ndarray of complex Transform-domain coefficient history. - power_vector_last : ndarray of float Final per-bin power estimate p[k]. - sqrtN : float The unitary normalization factor \sqrt{N}.

class RLS(pydaptivefiltering.AdaptiveFilter):
 28class RLS(AdaptiveFilter):
 29    """
 30    Recursive Least Squares (RLS) adaptive filter (complex-valued).
 31
 32    Exponentially-weighted least-squares adaptive FIR filter following
 33    Diniz (Alg. 5.3). The algorithm updates the coefficient vector using a
 34    Kalman-gain-like direction and updates an inverse correlation matrix via
 35    the matrix inversion lemma.
 36
 37    Parameters
 38    ----------
 39    filter_order : int
 40        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 41    delta : float
 42        Positive initialization factor for the inverse correlation matrix:
 43        ``S_d(0) = (1/delta) I``.
 44    lamb : float
 45        Forgetting factor ``lambda`` with ``0 < lambda <= 1``.
 46    w_init : array_like of complex, optional
 47        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 48        initializes with zeros.
 49    safe_eps : float, optional
 50        Small positive constant used to guard denominators. Default is 1e-12.
 51
 52    Notes
 53    -----
 54    At iteration ``k``, form the regressor vector (tapped delay line):
 55
 56    - ``x_k = [x[k], x[k-1], ..., x[k-M]]^T  ∈ 𝕮^{M+1}``
 57
 58    The a priori output and error are:
 59
 60    .. math::
 61        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 62
 63    Let ``S_d[k-1] ∈ 𝕮^{(M+1)\\times(M+1)}`` denote the inverse correlation
 64    estimate. Define the intermediate vector:
 65
 66    .. math::
 67        \\psi[k] = S_d[k-1] x_k.
 68
 69    The gain denominator and gain vector are:
 70
 71    .. math::
 72        \\Delta[k] = \\lambda + x_k^H \\psi[k]
 73                   = \\lambda + x_k^H S_d[k-1] x_k,
 74
 75    .. math::
 76        g[k] = \\frac{\\psi[k]}{\\Delta[k]}.
 77
 78    The coefficient update is:
 79
 80    .. math::
 81        w[k+1] = w[k] + e^*[k] \\, g[k],
 82
 83    and the inverse correlation update is:
 84
 85    .. math::
 86        S_d[k] = \\frac{1}{\\lambda}\\Bigl(S_d[k-1] - g[k] \\psi^H[k]\\Bigr).
 87
 88    A posteriori quantities
 89        If ``return_internal_states=True``, this implementation also computes the
 90        a posteriori output/error using the updated weights:
 91
 92        .. math::
 93            y^{post}[k] = w^H[k+1] x_k, \\qquad e^{post}[k] = d[k] - y^{post}[k].
 94
 95    References
 96    ----------
 97    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 98       Implementation*, 5th ed., Algorithm 5.3.
 99    """
100
101    supports_complex: bool = True
102
103    lamb: float
104    delta: float
105    S_d: np.ndarray
106
107    def __init__(
108        self,
109        filter_order: int,
110        delta: float,
111        lamb: float,
112        w_init: Optional[ArrayLike] = None,
113        *,
114        safe_eps: float = 1e-12,
115    ) -> None:
116        super().__init__(filter_order=int(filter_order), w_init=w_init)
117
118        self.lamb = float(lamb)
119        if not (0.0 < self.lamb <= 1.0):
120            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.")
121
122        self.delta = float(delta)
123        if self.delta <= 0.0:
124            raise ValueError(f"delta must be positive. Got delta={self.delta}.")
125
126        self._safe_eps = float(safe_eps)
127
128        n_taps = int(self.filter_order) + 1
129        self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
130
131    @validate_input
132    def optimize(
133        self,
134        input_signal: np.ndarray,
135        desired_signal: np.ndarray,
136        verbose: bool = False,
137        return_internal_states: bool = False,
138    ) -> OptimizationResult:
139        """
140        Executes the RLS adaptation loop over paired input/desired sequences.
141
142        Parameters
143        ----------
144        input_signal : array_like of complex
145            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
146        desired_signal : array_like of complex
147            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
148        verbose : bool, optional
149            If True, prints the total runtime after completion.
150        return_internal_states : bool, optional
151            If True, includes a posteriori sequences and final internal states in
152            ``result.extra`` (see below).
153
154        Returns
155        -------
156        OptimizationResult
157            Result object with fields:
158            - outputs : ndarray of complex, shape ``(N,)``
159                A priori output sequence, ``y[k] = w^H[k] x_k``.
160            - errors : ndarray of complex, shape ``(N,)``
161                A priori error sequence, ``e[k] = d[k] - y[k]``.
162            - coefficients : ndarray of complex
163                Coefficient history recorded by the base class.
164            - error_type : str
165                Set to ``"a_priori"``.
166            - extra : dict, optional
167                Present only if ``return_internal_states=True`` with:
168                - ``outputs_posteriori`` : ndarray of complex
169                    A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``.
170                - ``errors_posteriori`` : ndarray of complex
171                    A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``.
172                - ``S_d_last`` : ndarray of complex
173                    Final inverse correlation matrix ``S_d``.
174                - ``gain_last`` : ndarray of complex
175                    Last gain vector ``g[k]``.
176        """
177        tic: float = time()
178
179        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
180        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
181
182        n_samples: int = int(d.size)
183
184        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
185        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
186
187        y_post: Optional[np.ndarray] = None
188        e_post: Optional[np.ndarray] = None
189        if return_internal_states:
190            y_post = np.zeros(n_samples, dtype=complex)
191            e_post = np.zeros(n_samples, dtype=complex)
192
193        last_gain: Optional[np.ndarray] = None
194
195        for k in range(n_samples):
196            self.regressor = np.roll(self.regressor, 1)
197            self.regressor[0] = x[k]
198
199            y_k: complex = complex(np.vdot(self.w, self.regressor))
200            e_k: complex = d[k] - y_k
201
202            outputs[k] = y_k
203            errors[k] = e_k
204
205            Sx: np.ndarray = self.S_d @ self.regressor
206            den: complex = self.lamb + complex(np.vdot(self.regressor, Sx))
207            if abs(den) < self._safe_eps:
208                den = den + (self._safe_eps + 0.0j)
209
210            g: np.ndarray = Sx / den
211            last_gain = g
212
213            self.w = self.w + np.conj(e_k) * g
214
215            self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.lamb
216
217            if return_internal_states:
218                yk_post = complex(np.vdot(self.w, self.regressor))
219                y_post[k] = yk_post
220                e_post[k] = d[k] - yk_post
221
222            self._record_history()
223
224        runtime_s: float = float(time() - tic)
225        if verbose:
226            print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms")
227
228        extra: Optional[Dict[str, Any]] = None
229        if return_internal_states:
230            extra = {
231                "outputs_posteriori": y_post,
232                "errors_posteriori": e_post,
233                "S_d_last": self.S_d.copy(),
234                "gain_last": None if last_gain is None else last_gain.copy(),
235            }
236
237        return self._pack_results(
238            outputs=outputs,
239            errors=errors,
240            runtime_s=runtime_s,
241            error_type="a_priori",
242            extra=extra,
243        )

Recursive Least Squares (RLS) adaptive filter (complex-valued).

Exponentially-weighted least-squares adaptive FIR filter following Diniz (Alg. 5.3). The algorithm updates the coefficient vector using a Kalman-gain-like direction and updates an inverse correlation matrix via the matrix inversion lemma.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. delta : float Positive initialization factor for the inverse correlation matrix: S_d(0) = (1/delta) I. lamb : float Forgetting factor lambda with 0 < lambda <= 1. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. safe_eps : float, optional Small positive constant used to guard denominators. Default is 1e-12.

Notes

At iteration k, form the regressor vector (tapped delay line):

  • x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}

The a priori output and error are:

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Let S_d[k-1] ∈ 𝕮^{(M+1)\times(M+1)} denote the inverse correlation estimate. Define the intermediate vector:

$$\psi[k] = S_d[k-1] x_k.$$

The gain denominator and gain vector are:

$$\Delta[k] = \lambda + x_k^H \psi[k] = \lambda + x_k^H S_d[k-1] x_k,$$

$$g[k] = \frac{\psi[k]}{\Delta[k]}.$$

The coefficient update is:

$$w[k+1] = w[k] + e^*[k] \, g[k],$$

and the inverse correlation update is:

$$S_d[k] = \frac{1}{\lambda}\Bigl(S_d[k-1] - g[k] \psi^H[k]\Bigr).$$

A posteriori quantities If return_internal_states=True, this implementation also computes the a posteriori output/error using the updated weights:

$$y^{post}[k] = w^H[k+1] x_k, \qquad e^{post}[k] = d[k] - y^{post}[k].$$

References


RLS( filter_order: int, delta: float, lamb: float, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
107    def __init__(
108        self,
109        filter_order: int,
110        delta: float,
111        lamb: float,
112        w_init: Optional[ArrayLike] = None,
113        *,
114        safe_eps: float = 1e-12,
115    ) -> None:
116        super().__init__(filter_order=int(filter_order), w_init=w_init)
117
118        self.lamb = float(lamb)
119        if not (0.0 < self.lamb <= 1.0):
120            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.")
121
122        self.delta = float(delta)
123        if self.delta <= 0.0:
124            raise ValueError(f"delta must be positive. Got delta={self.delta}.")
125
126        self._safe_eps = float(safe_eps)
127
128        n_taps = int(self.filter_order) + 1
129        self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
supports_complex: bool = True
lamb: float
delta: float
S_d: numpy.ndarray
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
131    @validate_input
132    def optimize(
133        self,
134        input_signal: np.ndarray,
135        desired_signal: np.ndarray,
136        verbose: bool = False,
137        return_internal_states: bool = False,
138    ) -> OptimizationResult:
139        """
140        Executes the RLS adaptation loop over paired input/desired sequences.
141
142        Parameters
143        ----------
144        input_signal : array_like of complex
145            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
146        desired_signal : array_like of complex
147            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
148        verbose : bool, optional
149            If True, prints the total runtime after completion.
150        return_internal_states : bool, optional
151            If True, includes a posteriori sequences and final internal states in
152            ``result.extra`` (see below).
153
154        Returns
155        -------
156        OptimizationResult
157            Result object with fields:
158            - outputs : ndarray of complex, shape ``(N,)``
159                A priori output sequence, ``y[k] = w^H[k] x_k``.
160            - errors : ndarray of complex, shape ``(N,)``
161                A priori error sequence, ``e[k] = d[k] - y[k]``.
162            - coefficients : ndarray of complex
163                Coefficient history recorded by the base class.
164            - error_type : str
165                Set to ``"a_priori"``.
166            - extra : dict, optional
167                Present only if ``return_internal_states=True`` with:
168                - ``outputs_posteriori`` : ndarray of complex
169                    A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``.
170                - ``errors_posteriori`` : ndarray of complex
171                    A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``.
172                - ``S_d_last`` : ndarray of complex
173                    Final inverse correlation matrix ``S_d``.
174                - ``gain_last`` : ndarray of complex
175                    Last gain vector ``g[k]``.
176        """
177        tic: float = time()
178
179        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
180        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
181
182        n_samples: int = int(d.size)
183
184        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
185        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
186
187        y_post: Optional[np.ndarray] = None
188        e_post: Optional[np.ndarray] = None
189        if return_internal_states:
190            y_post = np.zeros(n_samples, dtype=complex)
191            e_post = np.zeros(n_samples, dtype=complex)
192
193        last_gain: Optional[np.ndarray] = None
194
195        for k in range(n_samples):
196            self.regressor = np.roll(self.regressor, 1)
197            self.regressor[0] = x[k]
198
199            y_k: complex = complex(np.vdot(self.w, self.regressor))
200            e_k: complex = d[k] - y_k
201
202            outputs[k] = y_k
203            errors[k] = e_k
204
205            Sx: np.ndarray = self.S_d @ self.regressor
206            den: complex = self.lamb + complex(np.vdot(self.regressor, Sx))
207            if abs(den) < self._safe_eps:
208                den = den + (self._safe_eps + 0.0j)
209
210            g: np.ndarray = Sx / den
211            last_gain = g
212
213            self.w = self.w + np.conj(e_k) * g
214
215            self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.lamb
216
217            if return_internal_states:
218                yk_post = complex(np.vdot(self.w, self.regressor))
219                y_post[k] = yk_post
220                e_post[k] = d[k] - yk_post
221
222            self._record_history()
223
224        runtime_s: float = float(time() - tic)
225        if verbose:
226            print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms")
227
228        extra: Optional[Dict[str, Any]] = None
229        if return_internal_states:
230            extra = {
231                "outputs_posteriori": y_post,
232                "errors_posteriori": e_post,
233                "S_d_last": self.S_d.copy(),
234                "gain_last": None if last_gain is None else last_gain.copy(),
235            }
236
237        return self._pack_results(
238            outputs=outputs,
239            errors=errors,
240            runtime_s=runtime_s,
241            error_type="a_priori",
242            extra=extra,
243        )

Executes the RLS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes a posteriori sequences and final internal states in result.extra (see below).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) A priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - outputs_posteriori : ndarray of complex A posteriori output sequence, y^{post}[k] = w^H[k+1] x_k. - errors_posteriori : ndarray of complex A posteriori error sequence, e^{post}[k] = d[k] - y^{post}[k]. - S_d_last : ndarray of complex Final inverse correlation matrix S_d. - gain_last : ndarray of complex Last gain vector g[k].

class RLSAlt(pydaptivefiltering.AdaptiveFilter):
 30class RLSAlt(AdaptiveFilter):
 31    """
 32    Alternative RLS (RLS-Alt) adaptive filter (complex-valued).
 33
 34    Alternative RLS algorithm based on Diniz (Alg. 5.4), designed to reduce
 35    the computational burden of the standard RLS recursion by introducing an
 36    auxiliary vector ``psi[k]``. The method maintains an estimate of the inverse
 37    input correlation matrix and updates the coefficients using a Kalman-gain-like
 38    vector.
 39
 40    Parameters
 41    ----------
 42    filter_order : int
 43        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 44    delta : float
 45        Positive initialization factor for the inverse correlation matrix:
 46        ``S_d(0) = (1/delta) I``.
 47    lamb : float
 48        Forgetting factor ``lambda`` with ``0 < lambda <= 1``.
 49    w_init : array_like of complex, optional
 50        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 51        initializes with zeros.
 52    safe_eps : float, optional
 53        Small positive constant used to guard denominators. Default is 1e-12.
 54
 55    Notes
 56    -----
 57    At iteration ``k``, form the regressor vector (tapped delay line):
 58
 59    - ``x_k = [x[k], x[k-1], ..., x[k-M]]^T  ∈ 𝕮^{M+1}``
 60
 61    The a priori output and error are:
 62
 63    .. math::
 64        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 65
 66    The key auxiliary vector is:
 67
 68    .. math::
 69        \\psi[k] = S_d[k-1] x_k,
 70
 71    where ``S_d[k-1]`` is the inverse correlation estimate.
 72
 73    Define the gain denominator:
 74
 75    .. math::
 76        \\Delta[k] = \\lambda + x_k^H \\psi[k]
 77                   = \\lambda + x_k^H S_d[k-1] x_k,
 78
 79    and the gain vector:
 80
 81    .. math::
 82        g[k] = \\frac{\\psi[k]}{\\Delta[k]}.
 83
 84    The coefficient update is:
 85
 86    .. math::
 87        w[k+1] = w[k] + e^*[k] \\, g[k],
 88
 89    and the inverse correlation update is:
 90
 91    .. math::
 92        S_d[k] = \\frac{1}{\\lambda}\\Bigl(S_d[k-1] - g[k] \\psi^H[k]\\Bigr).
 93
 94    A posteriori quantities
 95        If requested, this implementation also computes the *a posteriori*
 96        output/error using the updated weights:
 97
 98        .. math::
 99            y^{post}[k] = w^H[k+1] x_k, \\qquad e^{post}[k] = d[k] - y^{post}[k].
100
101    References
102    ----------
103    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
104       Implementation*, 5th ed., Algorithm 5.4.
105    """
106
107    supports_complex: bool = True
108
109    lamb: float
110    delta: float
111    S_d: np.ndarray
112
113    def __init__(
114        self,
115        filter_order: int,
116        delta: float,
117        lamb: float,
118        w_init: Optional[ArrayLike] = None,
119        *,
120        safe_eps: float = 1e-12,
121    ) -> None:
122        super().__init__(filter_order=int(filter_order), w_init=w_init)
123
124        self.lamb = float(lamb)
125        if not (0.0 < self.lamb <= 1.0):
126            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.")
127
128        self.delta = float(delta)
129        if self.delta <= 0.0:
130            raise ValueError(f"delta must be positive. Got delta={self.delta}.")
131
132        self._safe_eps = float(safe_eps)
133
134        n_taps = int(self.filter_order) + 1
135        self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
136
137    @validate_input
138    def optimize(
139        self,
140        input_signal: np.ndarray,
141        desired_signal: np.ndarray,
142        verbose: bool = False,
143        return_internal_states: bool = False,
144    ) -> OptimizationResult:
145        """
146        Executes the RLS-Alt adaptation loop over paired input/desired sequences.
147
148        Parameters
149        ----------
150        input_signal : array_like of complex
151            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
152        desired_signal : array_like of complex
153            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
154        verbose : bool, optional
155            If True, prints the total runtime after completion.
156        return_internal_states : bool, optional
157            If True, includes a posteriori sequences and the last internal states
158            in ``result.extra`` (see below).
159
160        Returns
161        -------
162        OptimizationResult
163            Result object with fields:
164            - outputs : ndarray of complex, shape ``(N,)``
165                A priori output sequence, ``y[k] = w^H[k] x_k``.
166            - errors : ndarray of complex, shape ``(N,)``
167                A priori error sequence, ``e[k] = d[k] - y[k]``.
168            - coefficients : ndarray of complex
169                Coefficient history recorded by the base class.
170            - error_type : str
171                Set to ``"a_priori"``.
172            - extra : dict, optional
173                Present only if ``return_internal_states=True`` with:
174                - ``outputs_posteriori`` : ndarray of complex
175                    A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``.
176                - ``errors_posteriori`` : ndarray of complex
177                    A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``.
178                - ``S_d_last`` : ndarray of complex
179                    Final inverse correlation matrix ``S_d``.
180                - ``gain_last`` : ndarray of complex
181                    Last gain vector ``g[k]``.
182        """
183        tic: float = time()
184
185        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
186        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
187
188        n_samples: int = int(d.size)
189
190        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
191        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
192
193        y_post: Optional[np.ndarray] = None
194        e_post: Optional[np.ndarray] = None
195        if return_internal_states:
196            y_post = np.zeros(n_samples, dtype=complex)
197            e_post = np.zeros(n_samples, dtype=complex)
198
199        last_gain: Optional[np.ndarray] = None
200
201        for k in range(n_samples):
202            self.regressor = np.roll(self.regressor, 1)
203            self.regressor[0] = x[k]
204
205            y_k = complex(np.vdot(self.w, self.regressor))
206            e_k = d[k] - y_k
207
208            outputs[k] = y_k
209            errors[k] = e_k
210
211            psi: np.ndarray = self.S_d @ self.regressor
212
213            den: complex = self.lamb + complex(np.vdot(self.regressor, psi))
214            if abs(den) < self._safe_eps:
215                den = den + (self._safe_eps + 0.0j)
216
217            g: np.ndarray = psi / den
218            last_gain = g
219
220            self.w = self.w + np.conj(e_k) * g
221
222            self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.lamb
223
224            if return_internal_states:
225                yk_post = complex(np.vdot(self.w, self.regressor))
226                y_post[k] = yk_post
227                e_post[k] = d[k] - yk_post
228
229            self._record_history()
230
231        runtime_s: float = float(time() - tic)
232        if verbose:
233            print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms")
234
235        extra: Optional[Dict[str, Any]] = None
236        if return_internal_states:
237            extra = {
238                "outputs_posteriori": y_post,
239                "errors_posteriori": e_post,
240                "S_d_last": self.S_d.copy(),
241                "gain_last": None if last_gain is None else last_gain.copy(),
242            }
243
244        return self._pack_results(
245            outputs=outputs,
246            errors=errors,
247            runtime_s=runtime_s,
248            error_type="a_priori",
249            extra=extra,
250        )

Alternative RLS (RLS-Alt) adaptive filter (complex-valued).

Alternative RLS algorithm based on Diniz (Alg. 5.4), designed to reduce the computational burden of the standard RLS recursion by introducing an auxiliary vector psi[k]. The method maintains an estimate of the inverse input correlation matrix and updates the coefficients using a Kalman-gain-like vector.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. delta : float Positive initialization factor for the inverse correlation matrix: S_d(0) = (1/delta) I. lamb : float Forgetting factor lambda with 0 < lambda <= 1. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. safe_eps : float, optional Small positive constant used to guard denominators. Default is 1e-12.

Notes

At iteration k, form the regressor vector (tapped delay line):

  • x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}

The a priori output and error are:

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

The key auxiliary vector is:

$$\psi[k] = S_d[k-1] x_k,$$

where S_d[k-1] is the inverse correlation estimate.

Define the gain denominator:

$$\Delta[k] = \lambda + x_k^H \psi[k] = \lambda + x_k^H S_d[k-1] x_k,$$

and the gain vector:

$$g[k] = \frac{\psi[k]}{\Delta[k]}.$$

The coefficient update is:

$$w[k+1] = w[k] + e^*[k] \, g[k],$$

and the inverse correlation update is:

$$S_d[k] = \frac{1}{\lambda}\Bigl(S_d[k-1] - g[k] \psi^H[k]\Bigr).$$

A posteriori quantities If requested, this implementation also computes the a posteriori output/error using the updated weights:

$$y^{post}[k] = w^H[k+1] x_k, \qquad e^{post}[k] = d[k] - y^{post}[k].$$

References


RLSAlt( filter_order: int, delta: float, lamb: float, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
113    def __init__(
114        self,
115        filter_order: int,
116        delta: float,
117        lamb: float,
118        w_init: Optional[ArrayLike] = None,
119        *,
120        safe_eps: float = 1e-12,
121    ) -> None:
122        super().__init__(filter_order=int(filter_order), w_init=w_init)
123
124        self.lamb = float(lamb)
125        if not (0.0 < self.lamb <= 1.0):
126            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.")
127
128        self.delta = float(delta)
129        if self.delta <= 0.0:
130            raise ValueError(f"delta must be positive. Got delta={self.delta}.")
131
132        self._safe_eps = float(safe_eps)
133
134        n_taps = int(self.filter_order) + 1
135        self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
supports_complex: bool = True
lamb: float
delta: float
S_d: numpy.ndarray
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
137    @validate_input
138    def optimize(
139        self,
140        input_signal: np.ndarray,
141        desired_signal: np.ndarray,
142        verbose: bool = False,
143        return_internal_states: bool = False,
144    ) -> OptimizationResult:
145        """
146        Executes the RLS-Alt adaptation loop over paired input/desired sequences.
147
148        Parameters
149        ----------
150        input_signal : array_like of complex
151            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
152        desired_signal : array_like of complex
153            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
154        verbose : bool, optional
155            If True, prints the total runtime after completion.
156        return_internal_states : bool, optional
157            If True, includes a posteriori sequences and the last internal states
158            in ``result.extra`` (see below).
159
160        Returns
161        -------
162        OptimizationResult
163            Result object with fields:
164            - outputs : ndarray of complex, shape ``(N,)``
165                A priori output sequence, ``y[k] = w^H[k] x_k``.
166            - errors : ndarray of complex, shape ``(N,)``
167                A priori error sequence, ``e[k] = d[k] - y[k]``.
168            - coefficients : ndarray of complex
169                Coefficient history recorded by the base class.
170            - error_type : str
171                Set to ``"a_priori"``.
172            - extra : dict, optional
173                Present only if ``return_internal_states=True`` with:
174                - ``outputs_posteriori`` : ndarray of complex
175                    A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``.
176                - ``errors_posteriori`` : ndarray of complex
177                    A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``.
178                - ``S_d_last`` : ndarray of complex
179                    Final inverse correlation matrix ``S_d``.
180                - ``gain_last`` : ndarray of complex
181                    Last gain vector ``g[k]``.
182        """
183        tic: float = time()
184
185        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
186        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
187
188        n_samples: int = int(d.size)
189
190        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
191        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
192
193        y_post: Optional[np.ndarray] = None
194        e_post: Optional[np.ndarray] = None
195        if return_internal_states:
196            y_post = np.zeros(n_samples, dtype=complex)
197            e_post = np.zeros(n_samples, dtype=complex)
198
199        last_gain: Optional[np.ndarray] = None
200
201        for k in range(n_samples):
202            self.regressor = np.roll(self.regressor, 1)
203            self.regressor[0] = x[k]
204
205            y_k = complex(np.vdot(self.w, self.regressor))
206            e_k = d[k] - y_k
207
208            outputs[k] = y_k
209            errors[k] = e_k
210
211            psi: np.ndarray = self.S_d @ self.regressor
212
213            den: complex = self.lamb + complex(np.vdot(self.regressor, psi))
214            if abs(den) < self._safe_eps:
215                den = den + (self._safe_eps + 0.0j)
216
217            g: np.ndarray = psi / den
218            last_gain = g
219
220            self.w = self.w + np.conj(e_k) * g
221
222            self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.lamb
223
224            if return_internal_states:
225                yk_post = complex(np.vdot(self.w, self.regressor))
226                y_post[k] = yk_post
227                e_post[k] = d[k] - yk_post
228
229            self._record_history()
230
231        runtime_s: float = float(time() - tic)
232        if verbose:
233            print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms")
234
235        extra: Optional[Dict[str, Any]] = None
236        if return_internal_states:
237            extra = {
238                "outputs_posteriori": y_post,
239                "errors_posteriori": e_post,
240                "S_d_last": self.S_d.copy(),
241                "gain_last": None if last_gain is None else last_gain.copy(),
242            }
243
244        return self._pack_results(
245            outputs=outputs,
246            errors=errors,
247            runtime_s=runtime_s,
248            error_type="a_priori",
249            extra=extra,
250        )

Executes the RLS-Alt adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes a posteriori sequences and the last internal states in result.extra (see below).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence, y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) A priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - outputs_posteriori : ndarray of complex A posteriori output sequence, y^{post}[k] = w^H[k+1] x_k. - errors_posteriori : ndarray of complex A posteriori error sequence, e^{post}[k] = d[k] - y^{post}[k]. - S_d_last : ndarray of complex Final inverse correlation matrix S_d. - gain_last : ndarray of complex Last gain vector g[k].

class SMNLMS(pydaptivefiltering.AdaptiveFilter):
 25class SMNLMS(AdaptiveFilter):
 26    """
 27    Set-Membership Normalized LMS (SM-NLMS) adaptive filter (complex-valued).
 28
 29    Implements Algorithm 6.1 (Diniz). The coefficients are updated **only** when
 30    the magnitude of the a priori error exceeds a prescribed bound ``gamma_bar``
 31    (set-membership criterion). When an update occurs, a normalized LMS-like
 32    step is applied with an effective step factor that depends on ``|e[k]|``.
 33
 34    Parameters
 35    ----------
 36    filter_order : int
 37        Adaptive FIR filter order ``M`` (number of coefficients is ``M + 1``).
 38    gamma_bar : float
 39        Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude.
 40        An update occurs only if ``|e[k]| > gamma_bar``.
 41    gamma : float
 42        Regularization constant used in the NLMS denominator
 43        ``gamma + ||x_k||^2`` to improve numerical stability.
 44    w_init : array_like of complex, optional
 45        Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros.
 46
 47    Notes
 48    -----
 49    Let the tapped-delay regressor be
 50
 51    .. math::
 52        x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 53
 54    The a priori output and error are
 55
 56    .. math::
 57        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 58
 59    Set-membership condition
 60        If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed.
 61
 62        If ``|e[k]| > \\bar{\\gamma}``, define the SM step factor
 63
 64        .. math::
 65            \\mu[k] = 1 - \\frac{\\bar{\\gamma}}{|e[k]|} \\in (0,1).
 66
 67    Normalized update (as implemented)
 68        With ``\\mathrm{den}[k] = \\gamma + \\|x_k\\|^2``, the coefficient update is
 69
 70        .. math::
 71            w[k+1] = w[k] + \\frac{\\mu[k]}{\\mathrm{den}[k]} \\, e^*[k] \\, x_k.
 72
 73    Returned error type
 74        This implementation reports the **a priori** sequences (computed before
 75        updating ``w``), so ``error_type="a_priori"``.
 76
 77    References
 78    ----------
 79    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 80       Implementation*, Algorithm 6.1.
 81    """
 82
 83    supports_complex: bool = True
 84
 85    gamma_bar: float
 86    gamma: float
 87    n_coeffs: int
 88
 89    def __init__(
 90        self,
 91        filter_order: int,
 92        gamma_bar: float,
 93        gamma: float,
 94        w_init: Optional[Union[np.ndarray, list]] = None,
 95    ) -> None:
 96        super().__init__(filter_order=filter_order, w_init=w_init)
 97        self.gamma_bar = float(gamma_bar)
 98        self.gamma = float(gamma)
 99        self.n_coeffs = int(self.filter_order + 1)
100
101        self.n_updates: int = 0
102
103    @validate_input
104    def optimize(
105        self,
106        input_signal: np.ndarray,
107        desired_signal: np.ndarray,
108        verbose: bool = False,
109        return_internal_states: bool = False,
110    ) -> OptimizationResult:
111        """
112        Executes the SM-NLMS adaptation over paired sequences ``x[k]`` and ``d[k]``.
113
114        Parameters
115        ----------
116        input_signal : array_like of complex
117            Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally).
118        desired_signal : array_like of complex
119            Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally).
120        verbose : bool, optional
121            If True, prints runtime and update statistics after completion.
122        return_internal_states : bool, optional
123            If True, includes internal trajectories in ``result.extra``:
124            ``mu`` and ``den`` (each length ``N``). Entries are zero when no update occurs.
125
126        Returns
127        -------
128        OptimizationResult
129            Result object with fields:
130            - outputs : ndarray of complex, shape ``(N,)``
131                A priori output sequence ``y[k] = w^H[k] x_k``.
132            - errors : ndarray of complex, shape ``(N,)``
133                A priori error sequence ``e[k] = d[k] - y[k]``.
134            - coefficients : ndarray of complex
135                Coefficient history recorded by the base class.
136            - error_type : str
137                Set to ``"a_priori"``.
138            - extra : dict
139                Always present with:
140                - ``"n_updates"`` : int
141                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
142                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
143                    Boolean mask indicating which iterations performed updates.
144                Additionally present only if ``return_internal_states=True``:
145                - ``"mu"`` : ndarray of float, shape ``(N,)``
146                    Step factor ``mu[k]`` (0 when no update).
147                - ``"den"`` : ndarray of float, shape ``(N,)``
148                    Denominator ``gamma + ||x_k||^2`` (0 when no update).
149        """
150        tic: float = time()
151
152        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
153        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
154
155        n_samples: int = int(x.size)
156
157        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
158        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
159
160        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
161
162        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
163        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
164
165        self.n_updates = 0
166
167        self.regressor = np.asarray(self.regressor, dtype=complex)
168        if self.regressor.size != self.n_coeffs:
169            self.regressor = np.zeros(self.n_coeffs, dtype=complex)
170
171        for k in range(n_samples):
172            self.regressor = np.roll(self.regressor, 1)
173            self.regressor[0] = x[k]
174
175            yk: complex = complex(np.dot(self.w.conj(), self.regressor))
176            ek: complex = complex(d[k] - yk)
177
178            outputs[k] = yk
179            errors[k] = ek
180
181            eabs: float = float(np.abs(ek))
182
183            if eabs > self.gamma_bar:
184                self.n_updates += 1
185                update_mask[k] = True
186
187                mu: float = float(1.0 - (self.gamma_bar / eabs))
188
189                norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor)))
190                den: float = float(self.gamma + norm_sq)
191
192                if den <= 0.0:
193                    den = float(self.gamma + 1e-30)
194
195                self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor)
196
197                if return_internal_states:
198                    if mu_track is not None:
199                        mu_track[k] = mu
200                    if den_track is not None:
201                        den_track[k] = den
202            else:
203                if return_internal_states and mu_track is not None:
204                    mu_track[k] = 0.0
205
206            self._record_history()
207
208        runtime_s: float = float(time() - tic)
209        if verbose:
210            pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0
211            print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms")
212
213        extra: Dict[str, Any] = {
214            "n_updates": int(self.n_updates),
215            "update_mask": update_mask,
216        }
217        if return_internal_states:
218            extra.update(
219                {
220                    "mu": mu_track,
221                    "den": den_track,
222                }
223            )
224
225        return self._pack_results(
226            outputs=outputs,
227            errors=errors,
228            runtime_s=runtime_s,
229            error_type="a_priori",
230            extra=extra,
231        )

Set-Membership Normalized LMS (SM-NLMS) adaptive filter (complex-valued).

Implements Algorithm 6.1 (Diniz). The coefficients are updated only when the magnitude of the a priori error exceeds a prescribed bound gamma_bar (set-membership criterion). When an update occurs, a normalized LMS-like step is applied with an effective step factor that depends on |e[k]|.

Parameters

filter_order : int Adaptive FIR filter order M (number of coefficients is M + 1). gamma_bar : float Set-membership bound \bar{\gamma} for the a priori error magnitude. An update occurs only if |e[k]| > gamma_bar. gamma : float Regularization constant used in the NLMS denominator gamma + ||x_k||^2 to improve numerical stability. w_init : array_like of complex, optional Initial coefficient vector w(0), shape (M + 1,). If None, zeros.

Notes

Let the tapped-delay regressor be

$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

The a priori output and error are

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Set-membership condition If |e[k]| \le \bar{\gamma}, no update is performed.

If ``|e[k]| > \bar{\gamma}``, define the SM step factor

$$\mu[k] = 1 - \frac{\bar{\gamma}}{|e[k]|} \in (0,1).$$

Normalized update (as implemented) With \mathrm{den}[k] = \gamma + \|x_k\|^2, the coefficient update is

$$w[k+1] = w[k] + \frac{\mu[k]}{\mathrm{den}[k]} \, e^*[k] \, x_k.$$

Returned error type This implementation reports the a priori sequences (computed before updating w), so error_type="a_priori".

References


SMNLMS( filter_order: int, gamma_bar: float, gamma: float, w_init: Union[numpy.ndarray, list, NoneType] = None)
 89    def __init__(
 90        self,
 91        filter_order: int,
 92        gamma_bar: float,
 93        gamma: float,
 94        w_init: Optional[Union[np.ndarray, list]] = None,
 95    ) -> None:
 96        super().__init__(filter_order=filter_order, w_init=w_init)
 97        self.gamma_bar = float(gamma_bar)
 98        self.gamma = float(gamma)
 99        self.n_coeffs = int(self.filter_order + 1)
100
101        self.n_updates: int = 0
supports_complex: bool = True
gamma_bar: float
gamma: float
n_coeffs: int
n_updates: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
103    @validate_input
104    def optimize(
105        self,
106        input_signal: np.ndarray,
107        desired_signal: np.ndarray,
108        verbose: bool = False,
109        return_internal_states: bool = False,
110    ) -> OptimizationResult:
111        """
112        Executes the SM-NLMS adaptation over paired sequences ``x[k]`` and ``d[k]``.
113
114        Parameters
115        ----------
116        input_signal : array_like of complex
117            Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally).
118        desired_signal : array_like of complex
119            Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally).
120        verbose : bool, optional
121            If True, prints runtime and update statistics after completion.
122        return_internal_states : bool, optional
123            If True, includes internal trajectories in ``result.extra``:
124            ``mu`` and ``den`` (each length ``N``). Entries are zero when no update occurs.
125
126        Returns
127        -------
128        OptimizationResult
129            Result object with fields:
130            - outputs : ndarray of complex, shape ``(N,)``
131                A priori output sequence ``y[k] = w^H[k] x_k``.
132            - errors : ndarray of complex, shape ``(N,)``
133                A priori error sequence ``e[k] = d[k] - y[k]``.
134            - coefficients : ndarray of complex
135                Coefficient history recorded by the base class.
136            - error_type : str
137                Set to ``"a_priori"``.
138            - extra : dict
139                Always present with:
140                - ``"n_updates"`` : int
141                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
142                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
143                    Boolean mask indicating which iterations performed updates.
144                Additionally present only if ``return_internal_states=True``:
145                - ``"mu"`` : ndarray of float, shape ``(N,)``
146                    Step factor ``mu[k]`` (0 when no update).
147                - ``"den"`` : ndarray of float, shape ``(N,)``
148                    Denominator ``gamma + ||x_k||^2`` (0 when no update).
149        """
150        tic: float = time()
151
152        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
153        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
154
155        n_samples: int = int(x.size)
156
157        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
158        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
159
160        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
161
162        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
163        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
164
165        self.n_updates = 0
166
167        self.regressor = np.asarray(self.regressor, dtype=complex)
168        if self.regressor.size != self.n_coeffs:
169            self.regressor = np.zeros(self.n_coeffs, dtype=complex)
170
171        for k in range(n_samples):
172            self.regressor = np.roll(self.regressor, 1)
173            self.regressor[0] = x[k]
174
175            yk: complex = complex(np.dot(self.w.conj(), self.regressor))
176            ek: complex = complex(d[k] - yk)
177
178            outputs[k] = yk
179            errors[k] = ek
180
181            eabs: float = float(np.abs(ek))
182
183            if eabs > self.gamma_bar:
184                self.n_updates += 1
185                update_mask[k] = True
186
187                mu: float = float(1.0 - (self.gamma_bar / eabs))
188
189                norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor)))
190                den: float = float(self.gamma + norm_sq)
191
192                if den <= 0.0:
193                    den = float(self.gamma + 1e-30)
194
195                self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor)
196
197                if return_internal_states:
198                    if mu_track is not None:
199                        mu_track[k] = mu
200                    if den_track is not None:
201                        den_track[k] = den
202            else:
203                if return_internal_states and mu_track is not None:
204                    mu_track[k] = 0.0
205
206            self._record_history()
207
208        runtime_s: float = float(time() - tic)
209        if verbose:
210            pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0
211            print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms")
212
213        extra: Dict[str, Any] = {
214            "n_updates": int(self.n_updates),
215            "update_mask": update_mask,
216        }
217        if return_internal_states:
218            extra.update(
219                {
220                    "mu": mu_track,
221                    "den": den_track,
222                }
223            )
224
225        return self._pack_results(
226            outputs=outputs,
227            errors=errors,
228            runtime_s=runtime_s,
229            error_type="a_priori",
230            extra=extra,
231        )

Executes the SM-NLMS adaptation over paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (flattened internally). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (flattened internally). verbose : bool, optional If True, prints runtime and update statistics after completion. return_internal_states : bool, optional If True, includes internal trajectories in result.extra: mu and den (each length N). Entries are zero when no update occurs.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) A priori error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always present with: - "n_updates" : int Number of coefficient updates (iterations where |e[k]| > gamma_bar). - "update_mask" : ndarray of bool, shape (N,) Boolean mask indicating which iterations performed updates. Additionally present only if return_internal_states=True: - "mu" : ndarray of float, shape (N,) Step factor mu[k] (0 when no update). - "den" : ndarray of float, shape (N,) Denominator gamma + ||x_k||^2 (0 when no update).

class SMBNLMS(pydaptivefiltering.AdaptiveFilter):
 25class SMBNLMS(AdaptiveFilter):
 26    """
 27    Set-Membership Binormalized LMS (SM-BNLMS) adaptive filter (complex-valued).
 28
 29    Implements Algorithm 6.5 (Diniz). This method can be viewed as a particular
 30    set-membership affine-projection (SM-AP) case with projection order ``L = 1``,
 31    i.e., it reuses the current and previous regressors to build a low-cost
 32    two-vector update.
 33
 34    The filter updates **only** when the magnitude of the a priori error exceeds
 35    a prescribed bound ``gamma_bar`` (set-membership criterion).
 36
 37    Parameters
 38    ----------
 39    filter_order : int
 40        Adaptive FIR filter order ``M`` (number of coefficients is ``M + 1``).
 41    gamma_bar : float
 42        Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude.
 43        An update occurs only if ``|e[k]| > gamma_bar``.
 44    gamma : float
 45        Regularization factor used in the binormalized denominator. It must be
 46        positive (or at least nonnegative) to improve numerical robustness.
 47    w_init : array_like of complex, optional
 48        Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros.
 49
 50    Notes
 51    -----
 52    Let the tapped-delay regressor be
 53
 54    .. math::
 55        x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1}
 56
 57    and the previous regressor be ``x_{k-1}`` (as stored by the implementation).
 58    The a priori output and error are
 59
 60    .. math::
 61        y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k].
 62
 63    Set-membership condition
 64        If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed.
 65
 66        If ``|e[k]| > \\bar{\\gamma}``, define the SM step factor
 67
 68        .. math::
 69            \\mu[k] = 1 - \\frac{\\bar{\\gamma}}{|e[k]|} \\in (0,1).
 70
 71    Binormalized denominator
 72        Define
 73
 74        .. math::
 75            a = \\|x_k\\|^2, \\quad b = \\|x_{k-1}\\|^2, \\quad c = x_{k-1}^H x_k,
 76
 77        and
 78
 79        .. math::
 80            \\mathrm{den}[k] = \\gamma + a b - |c|^2.
 81
 82        (The code enforces a small positive floor if ``den`` becomes nonpositive.)
 83
 84    Update (as implemented)
 85        The update uses two complex scalars ``\\lambda_1`` and ``\\lambda_2``:
 86
 87        .. math::
 88            \\lambda_1[k] = \\frac{\\mu[k]\\, e[k] \\, \\|x_{k-1}\\|^2}{\\mathrm{den}[k]}, \\qquad
 89            \\lambda_2[k] = -\\frac{\\mu[k]\\, e[k] \\, c^*}{\\mathrm{den}[k]}.
 90
 91        Then the coefficients are updated by
 92
 93        .. math::
 94            w[k+1] = w[k] + \\lambda_1^*[k] x_k + \\lambda_2^*[k] x_{k-1}.
 95
 96    Returned error type
 97        This implementation reports the **a priori** sequences (computed before
 98        updating ``w``), so ``error_type="a_priori"``.
 99
100    References
101    ----------
102    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
103       Implementation*, Algorithm 6.5.
104    """
105    supports_complex: bool = True
106
107    gamma_bar: float
108    gamma: float
109    n_coeffs: int
110
111    def __init__(
112        self,
113        filter_order: int,
114        gamma_bar: float,
115        gamma: float,
116        w_init: Optional[Union[np.ndarray, list]] = None,
117    ) -> None:
118        super().__init__(filter_order=filter_order, w_init=w_init)
119
120        self.gamma_bar = float(gamma_bar)
121        self.gamma = float(gamma)
122        self.n_coeffs = int(self.filter_order + 1)
123
124        self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex)
125
126        self.n_updates: int = 0
127
128    @validate_input
129    def optimize(
130        self,
131        input_signal: np.ndarray,
132        desired_signal: np.ndarray,
133        verbose: bool = False,
134        return_internal_states: bool = False,
135    ) -> OptimizationResult:
136        """
137        Executes the SM-BNLMS adaptation over paired sequences ``x[k]`` and ``d[k]``.
138
139        Parameters
140        ----------
141        input_signal : array_like of complex
142            Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally).
143        desired_signal : array_like of complex
144            Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally).
145        verbose : bool, optional
146            If True, prints runtime and update count after completion.
147        return_internal_states : bool, optional
148            If True, includes internal trajectories in ``result.extra``:
149            ``mu``, ``den``, ``lambda1``, ``lambda2`` (each length ``N``). Entries
150            are zero when no update occurs.
151
152        Returns
153        -------
154        OptimizationResult
155            Result object with fields:
156            - outputs : ndarray of complex, shape ``(N,)``
157                A priori output sequence ``y[k] = w^H[k] x_k``.
158            - errors : ndarray of complex, shape ``(N,)``
159                A priori error sequence ``e[k] = d[k] - y[k]``.
160            - coefficients : ndarray of complex
161                Coefficient history recorded by the base class.
162            - error_type : str
163                Set to ``"a_priori"``.
164            - extra : dict
165                Always present with:
166                - ``"n_updates"`` : int
167                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
168                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
169                    Boolean mask indicating which iterations performed updates.
170                Additionally present only if ``return_internal_states=True``:
171                - ``"mu"`` : ndarray of float, shape ``(N,)``
172                    Step factor ``mu[k]`` (0 when no update).
173                - ``"den"`` : ndarray of float, shape ``(N,)``
174                    Denominator used in ``lambda1/lambda2`` (0 when no update).
175                - ``"lambda1"`` : ndarray of complex, shape ``(N,)``
176                    ``lambda1[k]`` (0 when no update).
177                - ``"lambda2"`` : ndarray of complex, shape ``(N,)``
178                    ``lambda2[k]`` (0 when no update).
179        """
180        tic: float = time()
181
182        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
183        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
184
185        n_samples: int = int(x.size)
186        n_coeffs: int = int(self.n_coeffs)
187
188        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
189        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
190
191        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
192
193        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
194        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
195        lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
196        lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
197
198        self.n_updates = 0
199
200        self.regressor = np.asarray(self.regressor, dtype=complex)
201        if self.regressor.size != n_coeffs:
202            self.regressor = np.zeros(n_coeffs, dtype=complex)
203
204        self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex)
205        if self.regressor_prev.size != n_coeffs:
206            self.regressor_prev = np.zeros(n_coeffs, dtype=complex)
207
208        for k in range(n_samples):
209            self.regressor_prev = self.regressor.copy()
210
211            self.regressor = np.roll(self.regressor, 1)
212            self.regressor[0] = x[k]
213
214            yk: complex = complex(np.dot(self.w.conj(), self.regressor))
215            ek: complex = complex(d[k] - yk)
216
217            outputs[k] = yk
218            errors[k] = ek
219
220            eabs: float = float(np.abs(ek))
221
222            if eabs > self.gamma_bar:
223                self.n_updates += 1
224                update_mask[k] = True
225
226                mu: float = float(1.0 - (self.gamma_bar / eabs))
227
228                norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor)))
229                prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev)))
230                cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor))
231
232                den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2))
233
234                if den <= 0.0:
235                    den = float(self.gamma + 1e-30)
236
237                lambda1: complex = complex((mu * ek * prev_norm_sq) / den)
238                lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den)
239
240                self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev)
241
242                if return_internal_states:
243                    if mu_track is not None:
244                        mu_track[k] = mu
245                    if den_track is not None:
246                        den_track[k] = den
247                    if lam1_track is not None:
248                        lam1_track[k] = lambda1
249                    if lam2_track is not None:
250                        lam2_track[k] = lambda2
251            else:
252                if return_internal_states and mu_track is not None:
253                    mu_track[k] = 0.0
254
255            self._record_history()
256
257        runtime_s: float = float(time() - tic)
258        if verbose:
259            print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms")
260
261        extra: Dict[str, Any] = {
262            "n_updates": int(self.n_updates),
263            "update_mask": update_mask,
264        }
265        if return_internal_states:
266            extra.update(
267                {
268                    "mu": mu_track,
269                    "den": den_track,
270                    "lambda1": lam1_track,
271                    "lambda2": lam2_track,
272                }
273            )
274
275        return self._pack_results(
276            outputs=outputs,
277            errors=errors,
278            runtime_s=runtime_s,
279            error_type="a_priori",
280            extra=extra,
281        )

Set-Membership Binormalized LMS (SM-BNLMS) adaptive filter (complex-valued).

Implements Algorithm 6.5 (Diniz). This method can be viewed as a particular set-membership affine-projection (SM-AP) case with projection order L = 1, i.e., it reuses the current and previous regressors to build a low-cost two-vector update.

The filter updates only when the magnitude of the a priori error exceeds a prescribed bound gamma_bar (set-membership criterion).

Parameters

filter_order : int Adaptive FIR filter order M (number of coefficients is M + 1). gamma_bar : float Set-membership bound \bar{\gamma} for the a priori error magnitude. An update occurs only if |e[k]| > gamma_bar. gamma : float Regularization factor used in the binormalized denominator. It must be positive (or at least nonnegative) to improve numerical robustness. w_init : array_like of complex, optional Initial coefficient vector w(0), shape (M + 1,). If None, zeros.

Notes

Let the tapped-delay regressor be

$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}$$

and the previous regressor be x_{k-1} (as stored by the implementation). The a priori output and error are

$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$

Set-membership condition If |e[k]| \le \bar{\gamma}, no update is performed.

If ``|e[k]| > \bar{\gamma}``, define the SM step factor

$$\mu[k] = 1 - \frac{\bar{\gamma}}{|e[k]|} \in (0,1).$$

Binormalized denominator Define

$$a = \|x_k\|^2, \quad b = \|x_{k-1}\|^2, \quad c = x_{k-1}^H x_k,$$

and

$$\mathrm{den}[k] = \gamma + a b - |c|^2.$$

(The code enforces a small positive floor if ``den`` becomes nonpositive.)

Update (as implemented) The update uses two complex scalars \lambda_1 and \lambda_2:

$$\lambda_1[k] = \frac{\mu[k]\, e[k] \, \|x_{k-1}\|^2}{\mathrm{den}[k]}, \qquad

\lambda_2[k] = -\frac{\mu[k]\, e[k] \, c^*}{\mathrm{den}[k]}.$$

Then the coefficients are updated by

$$w[k+1] = w[k] + \lambda_1^*[k] x_k + \lambda_2^*[k] x_{k-1}.$$

Returned error type This implementation reports the a priori sequences (computed before updating w), so error_type="a_priori".

References


SMBNLMS( filter_order: int, gamma_bar: float, gamma: float, w_init: Union[numpy.ndarray, list, NoneType] = None)
111    def __init__(
112        self,
113        filter_order: int,
114        gamma_bar: float,
115        gamma: float,
116        w_init: Optional[Union[np.ndarray, list]] = None,
117    ) -> None:
118        super().__init__(filter_order=filter_order, w_init=w_init)
119
120        self.gamma_bar = float(gamma_bar)
121        self.gamma = float(gamma)
122        self.n_coeffs = int(self.filter_order + 1)
123
124        self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex)
125
126        self.n_updates: int = 0
supports_complex: bool = True
gamma_bar: float
gamma: float
n_coeffs: int
regressor_prev: numpy.ndarray
n_updates: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
128    @validate_input
129    def optimize(
130        self,
131        input_signal: np.ndarray,
132        desired_signal: np.ndarray,
133        verbose: bool = False,
134        return_internal_states: bool = False,
135    ) -> OptimizationResult:
136        """
137        Executes the SM-BNLMS adaptation over paired sequences ``x[k]`` and ``d[k]``.
138
139        Parameters
140        ----------
141        input_signal : array_like of complex
142            Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally).
143        desired_signal : array_like of complex
144            Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally).
145        verbose : bool, optional
146            If True, prints runtime and update count after completion.
147        return_internal_states : bool, optional
148            If True, includes internal trajectories in ``result.extra``:
149            ``mu``, ``den``, ``lambda1``, ``lambda2`` (each length ``N``). Entries
150            are zero when no update occurs.
151
152        Returns
153        -------
154        OptimizationResult
155            Result object with fields:
156            - outputs : ndarray of complex, shape ``(N,)``
157                A priori output sequence ``y[k] = w^H[k] x_k``.
158            - errors : ndarray of complex, shape ``(N,)``
159                A priori error sequence ``e[k] = d[k] - y[k]``.
160            - coefficients : ndarray of complex
161                Coefficient history recorded by the base class.
162            - error_type : str
163                Set to ``"a_priori"``.
164            - extra : dict
165                Always present with:
166                - ``"n_updates"`` : int
167                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
168                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
169                    Boolean mask indicating which iterations performed updates.
170                Additionally present only if ``return_internal_states=True``:
171                - ``"mu"`` : ndarray of float, shape ``(N,)``
172                    Step factor ``mu[k]`` (0 when no update).
173                - ``"den"`` : ndarray of float, shape ``(N,)``
174                    Denominator used in ``lambda1/lambda2`` (0 when no update).
175                - ``"lambda1"`` : ndarray of complex, shape ``(N,)``
176                    ``lambda1[k]`` (0 when no update).
177                - ``"lambda2"`` : ndarray of complex, shape ``(N,)``
178                    ``lambda2[k]`` (0 when no update).
179        """
180        tic: float = time()
181
182        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
183        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
184
185        n_samples: int = int(x.size)
186        n_coeffs: int = int(self.n_coeffs)
187
188        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
189        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
190
191        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
192
193        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
194        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
195        lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
196        lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
197
198        self.n_updates = 0
199
200        self.regressor = np.asarray(self.regressor, dtype=complex)
201        if self.regressor.size != n_coeffs:
202            self.regressor = np.zeros(n_coeffs, dtype=complex)
203
204        self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex)
205        if self.regressor_prev.size != n_coeffs:
206            self.regressor_prev = np.zeros(n_coeffs, dtype=complex)
207
208        for k in range(n_samples):
209            self.regressor_prev = self.regressor.copy()
210
211            self.regressor = np.roll(self.regressor, 1)
212            self.regressor[0] = x[k]
213
214            yk: complex = complex(np.dot(self.w.conj(), self.regressor))
215            ek: complex = complex(d[k] - yk)
216
217            outputs[k] = yk
218            errors[k] = ek
219
220            eabs: float = float(np.abs(ek))
221
222            if eabs > self.gamma_bar:
223                self.n_updates += 1
224                update_mask[k] = True
225
226                mu: float = float(1.0 - (self.gamma_bar / eabs))
227
228                norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor)))
229                prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev)))
230                cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor))
231
232                den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2))
233
234                if den <= 0.0:
235                    den = float(self.gamma + 1e-30)
236
237                lambda1: complex = complex((mu * ek * prev_norm_sq) / den)
238                lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den)
239
240                self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev)
241
242                if return_internal_states:
243                    if mu_track is not None:
244                        mu_track[k] = mu
245                    if den_track is not None:
246                        den_track[k] = den
247                    if lam1_track is not None:
248                        lam1_track[k] = lambda1
249                    if lam2_track is not None:
250                        lam2_track[k] = lambda2
251            else:
252                if return_internal_states and mu_track is not None:
253                    mu_track[k] = 0.0
254
255            self._record_history()
256
257        runtime_s: float = float(time() - tic)
258        if verbose:
259            print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms")
260
261        extra: Dict[str, Any] = {
262            "n_updates": int(self.n_updates),
263            "update_mask": update_mask,
264        }
265        if return_internal_states:
266            extra.update(
267                {
268                    "mu": mu_track,
269                    "den": den_track,
270                    "lambda1": lam1_track,
271                    "lambda2": lam2_track,
272                }
273            )
274
275        return self._pack_results(
276            outputs=outputs,
277            errors=errors,
278            runtime_s=runtime_s,
279            error_type="a_priori",
280            extra=extra,
281        )

Executes the SM-BNLMS adaptation over paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (flattened internally). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (flattened internally). verbose : bool, optional If True, prints runtime and update count after completion. return_internal_states : bool, optional If True, includes internal trajectories in result.extra: mu, den, lambda1, lambda2 (each length N). Entries are zero when no update occurs.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence y[k] = w^H[k] x_k. - errors : ndarray of complex, shape (N,) A priori error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always present with: - "n_updates" : int Number of coefficient updates (iterations where |e[k]| > gamma_bar). - "update_mask" : ndarray of bool, shape (N,) Boolean mask indicating which iterations performed updates. Additionally present only if return_internal_states=True: - "mu" : ndarray of float, shape (N,) Step factor mu[k] (0 when no update). - "den" : ndarray of float, shape (N,) Denominator used in lambda1/lambda2 (0 when no update). - "lambda1" : ndarray of complex, shape (N,) lambda1[k] (0 when no update). - "lambda2" : ndarray of complex, shape (N,) lambda2[k] (0 when no update).

class SMAffineProjection(pydaptivefiltering.AdaptiveFilter):
 26class SMAffineProjection(AdaptiveFilter):
 27    """
 28    Set-Membership Affine-Projection (SM-AP) adaptive filter (complex-valued).
 29
 30    Supervised affine-projection algorithm with *set-membership* updating,
 31    following Diniz (Alg. 6.2). Coefficients are updated **only** when the
 32    magnitude of the most-recent a priori error exceeds a prescribed bound
 33    ``gamma_bar``. When an update occurs, the algorithm enforces a target
 34    a posteriori error vector (provided by ``gamma_bar_vector``).
 35
 36    Parameters
 37    ----------
 38    filter_order : int
 39        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 40    gamma_bar : float
 41        Set-membership bound for the (most recent) a priori error magnitude.
 42        An update is performed only if ``|e[k]| > gamma_bar``.
 43    gamma_bar_vector : array_like of complex
 44        Target a posteriori error vector with shape ``(L + 1,)`` (stored
 45        internally as a column vector). This is algorithm-dependent and
 46        corresponds to the desired post-update constraint in Alg. 6.2.
 47    gamma : float
 48        Regularization factor ``gamma`` used in the affine-projection normal
 49        equations to improve numerical stability.
 50    L : int
 51        Data reuse factor (projection order). The affine-projection block size is
 52        ``P = L + 1``.
 53    w_init : array_like of complex, optional
 54        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 55        initializes with zeros.
 56
 57    Notes
 58    -----
 59    At iteration ``k``, form the regressor block matrix:
 60
 61    - ``X(k) ∈ C^{(M+1) x (L+1)}``, whose columns are the most recent regressor
 62      vectors (newest in column 0).
 63
 64    The affine-projection output vector is:
 65
 66    .. math::
 67        y_{ap}(k) = X^H(k) w(k) \\in \\mathbb{C}^{L+1}.
 68
 69    Let the stacked desired vector be:
 70
 71    .. math::
 72        d_{ap}(k) \\in \\mathbb{C}^{L+1},
 73
 74    with newest sample at index 0. The a priori error vector is:
 75
 76    .. math::
 77        e_{ap}(k) = d_{ap}(k) - y_{ap}(k).
 78
 79    This implementation uses the *most recent* scalar component as the reported
 80    output and error:
 81
 82    .. math::
 83        y[k] = y_{ap}(k)[0], \\qquad e[k] = e_{ap}(k)[0].
 84
 85    Set-membership update rule
 86        Update **only if**:
 87
 88        .. math::
 89            |e[k]| > \\bar{\\gamma}.
 90
 91        When updating, solve the regularized system:
 92
 93        .. math::
 94            (X^H(k)X(k) + \\gamma I_{L+1})\\, s(k) =
 95            \\bigl(e_{ap}(k) - \\bar{\\gamma}_{vec}^*(k)\\bigr),
 96
 97        and update the coefficients as:
 98
 99        .. math::
100            w(k+1) = w(k) + X(k)\\, s(k).
101
102        Here ``\\bar{\\gamma}_{vec}`` is provided by ``gamma_bar_vector`` (stored
103        as a column vector); complex conjugation is applied to match the internal
104        conjugate-domain formulation used in the implementation.
105
106    References
107    ----------
108    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
109       Implementation*, 5th ed., Algorithm 6.2.
110    """
111    supports_complex: bool = True
112
113    gamma_bar: float
114    gamma_bar_vector: np.ndarray
115    gamma: float
116    L: int
117    n_coeffs: int
118
119    def __init__(
120        self,
121        filter_order: int,
122        gamma_bar: float,
123        gamma_bar_vector: Union[np.ndarray, list],
124        gamma: float,
125        L: int,
126        w_init: Optional[Union[np.ndarray, list]] = None,
127    ) -> None:
128        super().__init__(filter_order=filter_order, w_init=w_init)
129
130        self.gamma_bar = float(gamma_bar)
131        self.gamma = float(gamma)
132        self.L = int(L)
133
134        self.n_coeffs = int(self.filter_order + 1)
135
136        gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel()
137        if gvec.size != (self.L + 1):
138            raise ValueError(
139                f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}"
140            )
141        self.gamma_bar_vector = gvec.reshape(-1, 1)
142
143        self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
144
145        self.n_updates: int = 0
146
147    @validate_input
148    def optimize(
149        self,
150        input_signal: np.ndarray,
151        desired_signal: np.ndarray,
152        verbose: bool = False,
153        return_internal_states: bool = False,
154    ) -> OptimizationResult:
155        """
156        Executes the SM-AP adaptation loop over paired input/desired sequences.
157
158        Parameters
159        ----------
160        input_signal : array_like of complex
161            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
162        desired_signal : array_like of complex
163            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
164        verbose : bool, optional
165            If True, prints total runtime and update count after completion.
166        return_internal_states : bool, optional
167            If True, includes the full a priori AP error-vector trajectory in
168            ``result.extra`` as ``"errors_vector"`` with shape ``(N, L + 1)``.
169
170        Returns
171        -------
172        OptimizationResult
173            Result object with fields:
174            - outputs : ndarray of complex, shape ``(N,)``
175                Scalar a priori output sequence, ``y[k] = y_{ap}(k)[0]``.
176            - errors : ndarray of complex, shape ``(N,)``
177                Scalar a priori error sequence, ``e[k] = e_{ap}(k)[0]``.
178            - coefficients : ndarray of complex
179                Coefficient history recorded by the base class.
180            - error_type : str
181                Set to ``"a_priori"``.
182            - extra : dict
183                Always present with:
184                - ``"n_updates"`` : int
185                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
186                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
187                    Boolean mask indicating which iterations performed updates.
188                Additionally present only if ``return_internal_states=True``:
189                - ``"errors_vector"`` : ndarray of complex, shape ``(N, L + 1)``
190                    Full affine-projection a priori error vectors over time.
191        """
192        tic: float = time()
193
194        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
195        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
196
197        n_samples: int = int(d.size)
198        n_coeffs: int = int(self.n_coeffs)
199        Lp1: int = int(self.L + 1)
200
201        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
202        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
203        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
204
205        errors_vec_track: Optional[np.ndarray] = (
206            np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None
207        )
208
209        self.n_updates = 0
210        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
211
212        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
213        prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d])
214
215        for k in range(n_samples):
216            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
217
218            start_idx = k + n_coeffs - 1
219            stop = (k - 1) if (k > 0) else None
220            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
221
222            output_ap_conj = (self.regressor_matrix.conj().T) @ w_current
223
224            desired_slice = prefixed_desired[k + self.L : stop : -1]
225            error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj
226
227            yk = output_ap_conj[0, 0]
228            ek = error_ap_conj[0, 0]
229
230            outputs[k] = yk
231            errors[k] = ek
232            if return_internal_states and errors_vec_track is not None:
233                errors_vec_track[k, :] = error_ap_conj.ravel()
234
235            if np.abs(ek) > self.gamma_bar:
236                self.n_updates += 1
237                update_mask[k] = True
238
239                R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1)
240                b = error_ap_conj - self.gamma_bar_vector.conj()
241
242                try:
243                    step = np.linalg.solve(R, b)
244                except np.linalg.LinAlgError:
245                    step = np.linalg.pinv(R) @ b
246
247                w_current = w_current + (self.regressor_matrix @ step)
248
249            self.w = w_current.ravel()
250            self._record_history()
251
252        runtime_s: float = float(time() - tic)
253        if verbose:
254            print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms")
255
256        extra: Dict[str, Any] = {
257            "n_updates": int(self.n_updates),
258            "update_mask": update_mask,
259        }
260        if return_internal_states:
261            extra["errors_vector"] = errors_vec_track
262
263        return self._pack_results(
264            outputs=outputs,
265            errors=errors,
266            runtime_s=runtime_s,
267            error_type="a_priori",
268            extra=extra,
269        )

Set-Membership Affine-Projection (SM-AP) adaptive filter (complex-valued).

Supervised affine-projection algorithm with set-membership updating, following Diniz (Alg. 6.2). Coefficients are updated only when the magnitude of the most-recent a priori error exceeds a prescribed bound gamma_bar. When an update occurs, the algorithm enforces a target a posteriori error vector (provided by gamma_bar_vector).

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M + 1. gamma_bar : float Set-membership bound for the (most recent) a priori error magnitude. An update is performed only if |e[k]| > gamma_bar. gamma_bar_vector : array_like of complex Target a posteriori error vector with shape (L + 1,) (stored internally as a column vector). This is algorithm-dependent and corresponds to the desired post-update constraint in Alg. 6.2. gamma : float Regularization factor gamma used in the affine-projection normal equations to improve numerical stability. L : int Data reuse factor (projection order). The affine-projection block size is P = L + 1. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the regressor block matrix:

  • X(k) ∈ C^{(M+1) x (L+1)}, whose columns are the most recent regressor vectors (newest in column 0).

The affine-projection output vector is:

$$y_{ap}(k) = X^H(k) w(k) \in \mathbb{C}^{L+1}.$$

Let the stacked desired vector be:

$$d_{ap}(k) \in \mathbb{C}^{L+1},$$

with newest sample at index 0. The a priori error vector is:

$$e_{ap}(k) = d_{ap}(k) - y_{ap}(k).$$

This implementation uses the most recent scalar component as the reported output and error:

$$y[k] = y_{ap}(k)[0], \qquad e[k] = e_{ap}(k)[0].$$

Set-membership update rule Update only if:

$$|e[k]| > \bar{\gamma}.$$

When updating, solve the regularized system:

$$(X^H(k)X(k) + \gamma I_{L+1})\, s(k) =

\bigl(e_{ap}(k) - \bar{\gamma}_{vec}^*(k)\bigr),$$

and update the coefficients as:

$$w(k+1) = w(k) + X(k)\, s(k).$$

Here ``\bar{\gamma}_{vec}`` is provided by ``gamma_bar_vector`` (stored
as a column vector); complex conjugation is applied to match the internal
conjugate-domain formulation used in the implementation.

References


SMAffineProjection( filter_order: int, gamma_bar: float, gamma_bar_vector: Union[numpy.ndarray, list], gamma: float, L: int, w_init: Union[numpy.ndarray, list, NoneType] = None)
119    def __init__(
120        self,
121        filter_order: int,
122        gamma_bar: float,
123        gamma_bar_vector: Union[np.ndarray, list],
124        gamma: float,
125        L: int,
126        w_init: Optional[Union[np.ndarray, list]] = None,
127    ) -> None:
128        super().__init__(filter_order=filter_order, w_init=w_init)
129
130        self.gamma_bar = float(gamma_bar)
131        self.gamma = float(gamma)
132        self.L = int(L)
133
134        self.n_coeffs = int(self.filter_order + 1)
135
136        gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel()
137        if gvec.size != (self.L + 1):
138            raise ValueError(
139                f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}"
140            )
141        self.gamma_bar_vector = gvec.reshape(-1, 1)
142
143        self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
144
145        self.n_updates: int = 0
supports_complex: bool = True
gamma_bar: float
gamma_bar_vector: numpy.ndarray
gamma: float
L: int
n_coeffs: int
regressor_matrix
n_updates: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
147    @validate_input
148    def optimize(
149        self,
150        input_signal: np.ndarray,
151        desired_signal: np.ndarray,
152        verbose: bool = False,
153        return_internal_states: bool = False,
154    ) -> OptimizationResult:
155        """
156        Executes the SM-AP adaptation loop over paired input/desired sequences.
157
158        Parameters
159        ----------
160        input_signal : array_like of complex
161            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
162        desired_signal : array_like of complex
163            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
164        verbose : bool, optional
165            If True, prints total runtime and update count after completion.
166        return_internal_states : bool, optional
167            If True, includes the full a priori AP error-vector trajectory in
168            ``result.extra`` as ``"errors_vector"`` with shape ``(N, L + 1)``.
169
170        Returns
171        -------
172        OptimizationResult
173            Result object with fields:
174            - outputs : ndarray of complex, shape ``(N,)``
175                Scalar a priori output sequence, ``y[k] = y_{ap}(k)[0]``.
176            - errors : ndarray of complex, shape ``(N,)``
177                Scalar a priori error sequence, ``e[k] = e_{ap}(k)[0]``.
178            - coefficients : ndarray of complex
179                Coefficient history recorded by the base class.
180            - error_type : str
181                Set to ``"a_priori"``.
182            - extra : dict
183                Always present with:
184                - ``"n_updates"`` : int
185                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
186                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
187                    Boolean mask indicating which iterations performed updates.
188                Additionally present only if ``return_internal_states=True``:
189                - ``"errors_vector"`` : ndarray of complex, shape ``(N, L + 1)``
190                    Full affine-projection a priori error vectors over time.
191        """
192        tic: float = time()
193
194        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
195        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
196
197        n_samples: int = int(d.size)
198        n_coeffs: int = int(self.n_coeffs)
199        Lp1: int = int(self.L + 1)
200
201        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
202        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
203        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
204
205        errors_vec_track: Optional[np.ndarray] = (
206            np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None
207        )
208
209        self.n_updates = 0
210        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
211
212        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
213        prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d])
214
215        for k in range(n_samples):
216            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
217
218            start_idx = k + n_coeffs - 1
219            stop = (k - 1) if (k > 0) else None
220            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
221
222            output_ap_conj = (self.regressor_matrix.conj().T) @ w_current
223
224            desired_slice = prefixed_desired[k + self.L : stop : -1]
225            error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj
226
227            yk = output_ap_conj[0, 0]
228            ek = error_ap_conj[0, 0]
229
230            outputs[k] = yk
231            errors[k] = ek
232            if return_internal_states and errors_vec_track is not None:
233                errors_vec_track[k, :] = error_ap_conj.ravel()
234
235            if np.abs(ek) > self.gamma_bar:
236                self.n_updates += 1
237                update_mask[k] = True
238
239                R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1)
240                b = error_ap_conj - self.gamma_bar_vector.conj()
241
242                try:
243                    step = np.linalg.solve(R, b)
244                except np.linalg.LinAlgError:
245                    step = np.linalg.pinv(R) @ b
246
247                w_current = w_current + (self.regressor_matrix @ step)
248
249            self.w = w_current.ravel()
250            self._record_history()
251
252        runtime_s: float = float(time() - tic)
253        if verbose:
254            print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms")
255
256        extra: Dict[str, Any] = {
257            "n_updates": int(self.n_updates),
258            "update_mask": update_mask,
259        }
260        if return_internal_states:
261            extra["errors_vector"] = errors_vec_track
262
263        return self._pack_results(
264            outputs=outputs,
265            errors=errors,
266            runtime_s=runtime_s,
267            error_type="a_priori",
268            extra=extra,
269        )

Executes the SM-AP adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints total runtime and update count after completion. return_internal_states : bool, optional If True, includes the full a priori AP error-vector trajectory in result.extra as "errors_vector" with shape (N, L + 1).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar a priori output sequence, y[k] = y_{ap}(k)[0]. - errors : ndarray of complex, shape (N,) Scalar a priori error sequence, e[k] = e_{ap}(k)[0]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always present with: - "n_updates" : int Number of coefficient updates (iterations where |e[k]| > gamma_bar). - "update_mask" : ndarray of bool, shape (N,) Boolean mask indicating which iterations performed updates. Additionally present only if return_internal_states=True: - "errors_vector" : ndarray of complex, shape (N, L + 1) Full affine-projection a priori error vectors over time.

class SimplifiedSMPUAP(pydaptivefiltering.AdaptiveFilter):
 27class SimplifiedSMPUAP(AdaptiveFilter):
 28    """
 29    Implements the Simplified Set-membership Partial-Update Affine-Projection (SM-Simp-PUAP)
 30    algorithm for complex-valued data. (Algorithm 6.6, Diniz)
 31
 32    Note
 33    ----
 34    The original implementation warns that this algorithm is under development and may be unstable
 35    for complex-valued simulations.
 36    """
 37    supports_complex: bool = True
 38
 39    gamma_bar: float
 40    gamma: float
 41    L: int
 42    n_coeffs: int
 43
 44    def __init__(
 45        self,
 46        filter_order: int,
 47        gamma_bar: float,
 48        gamma: float,
 49        L: int,
 50        up_selector: Union[np.ndarray, list],
 51        w_init: Optional[Union[np.ndarray, list]] = None,
 52    ) -> None:
 53        """
 54        Parameters
 55        ----------
 56        filter_order:
 57            FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1.
 58        gamma_bar:
 59            Error magnitude threshold for triggering updates.
 60        gamma:
 61            Regularization factor for the AP correlation matrix.
 62        L:
 63            Reuse data factor / constraint length (projection order).
 64        up_selector:
 65            Partial-update selector matrix with shape (M+1, N), entries in {0,1}.
 66            Each column selects which coefficients are updated at iteration k.
 67        w_init:
 68            Optional initial coefficient vector. If None, initializes to zeros.
 69        """
 70        warnings.warn(
 71            "SM-Simp-PUAP is currently under development and may not produce intended results. "
 72            "Instability or divergence (high MSE) has been observed in complex-valued simulations.",
 73            UserWarning,
 74        )
 75
 76        super().__init__(filter_order=filter_order, w_init=w_init)
 77
 78        self.gamma_bar = float(gamma_bar)
 79        self.gamma = float(gamma)
 80        self.L = int(L)
 81        self.n_coeffs = int(self.filter_order + 1)
 82
 83        sel = np.asarray(up_selector)
 84        if sel.ndim != 2:
 85            raise ValueError("up_selector must be a 2D array with shape (M+1, N).")
 86        if sel.shape[0] != self.n_coeffs:
 87            raise ValueError(
 88                f"up_selector must have shape (M+1, N) with M+1={self.n_coeffs}, got {sel.shape}."
 89            )
 90        self.up_selector: np.ndarray = sel
 91
 92        # Regressor matrix: columns are current/past regressors (x_k, x_{k-1}, ..., x_{k-L})
 93        self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
 94
 95        # Backwards-compat alias
 96        self.X_matrix = self.regressor_matrix
 97
 98        # Bookkeeping
 99        self.n_updates: int = 0
100
101    @validate_input
102    def optimize(
103        self,
104        input_signal: np.ndarray,
105        desired_signal: np.ndarray,
106        verbose: bool = False,
107        return_internal_states: bool = False,
108    ) -> OptimizationResult:
109        """
110        Executes the SM-Simp-PUAP adaptation.
111
112        Parameters
113        ----------
114        input_signal:
115            Input signal x[k].
116        desired_signal:
117            Desired signal d[k].
118        verbose:
119            If True, prints runtime and update count.
120        return_internal_states:
121            If True, includes internal trajectories in result.extra.
122
123        Returns
124        -------
125        OptimizationResult
126            outputs:
127                A-priori output y[k] (first component of AP output vector).
128            errors:
129                A-priori error e[k] (first component of AP error vector).
130            coefficients:
131                History of coefficients stored in the base class.
132            error_type:
133                "a_priori".
134
135        Extra (always)
136        -------------
137        extra["n_updates"]:
138            Number of coefficient updates (iterations where |e(k)| > gamma_bar).
139        extra["update_mask"]:
140            Boolean array marking which iterations performed updates.
141
142        Extra (when return_internal_states=True)
143        --------------------------------------
144        extra["mu"]:
145            Trajectory of mu[k] (0 when no update).
146        extra["selected_count"]:
147            Number of selected coefficients each iteration.
148        """
149        tic: float = time()
150
151        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
152        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
153
154        n_samples: int = int(d.size)
155        n_coeffs: int = int(self.n_coeffs)
156        Lp1: int = int(self.L + 1)
157
158        # Validate selector length vs iterations
159        if self.up_selector.shape[1] < n_samples:
160            raise ValueError(
161                f"up_selector has {self.up_selector.shape[1]} columns, but signal has {n_samples} samples."
162            )
163
164        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
165        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
166
167        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
168
169        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
170        selcnt_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=int) if return_internal_states else None
171
172        self.n_updates = 0
173        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
174
175        # Padding (matches original slicing/indexing)
176        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
177        prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d])
178
179        # u1 = [1, 0, 0, ..., 0]^T  (selects first component)
180        u1: np.ndarray = np.zeros((Lp1, 1), dtype=complex)
181        u1[0, 0] = 1.0
182
183        for k in range(n_samples):
184            # Update regressor matrix
185            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
186            start_idx = k + n_coeffs - 1
187            stop = (k - 1) if (k > 0) else None
188            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
189
190            # AP a-priori output/error vectors
191            output_ap_conj: np.ndarray = (self.regressor_matrix.conj().T) @ w_current  # (L+1,1)
192            desired_slice = prefixed_desired[k + self.L : stop : -1]
193            error_ap_conj: np.ndarray = desired_slice.conj().reshape(-1, 1) - output_ap_conj  # (L+1,1)
194
195            yk: complex = complex(output_ap_conj[0, 0])
196            ek: complex = complex(error_ap_conj[0, 0])
197
198            outputs[k] = yk
199            errors[k] = ek
200
201            eabs: float = float(np.abs(ek))
202            if eabs > self.gamma_bar:
203                self.n_updates += 1
204                update_mask[k] = True
205                mu: float = float(1.0 - (self.gamma_bar / eabs))
206            else:
207                mu = 0.0
208
209            # Partial-update selector for this iteration (column k): shape (M+1,1)
210            c_vec: np.ndarray = self.up_selector[:, k].reshape(-1, 1).astype(float)
211
212            if return_internal_states and selcnt_track is not None:
213                selcnt_track[k] = int(np.sum(c_vec != 0))
214
215            if mu > 0.0:
216                # Apply selection (element-wise) to regressor matrix
217                C_reg: np.ndarray = c_vec * self.regressor_matrix  # (M+1, L+1)
218
219                # R = X^H C X  (as in original: regressor_matrix^H @ C_reg)
220                R: np.ndarray = (self.regressor_matrix.conj().T @ C_reg) + self.gamma * np.eye(Lp1)
221
222                rhs: np.ndarray = mu * ek * u1  # (L+1,1)
223
224                try:
225                    inv_term = np.linalg.solve(R, rhs)
226                except np.linalg.LinAlgError:
227                    inv_term = np.linalg.pinv(R) @ rhs
228
229                w_current = w_current + (C_reg @ inv_term)
230
231            if return_internal_states and mu_track is not None:
232                mu_track[k] = mu
233
234            # Commit coefficients + history
235            self.w = w_current.ravel()
236            self._record_history()
237
238        runtime_s: float = float(time() - tic)
239        if verbose:
240            print(f"[SM-Simp-PUAP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms")
241
242        extra: Dict[str, Any] = {
243            "n_updates": int(self.n_updates),
244            "update_mask": update_mask,
245        }
246        if return_internal_states:
247            extra.update(
248                {
249                    "mu": mu_track,
250                    "selected_count": selcnt_track,
251                }
252            )
253
254        return self._pack_results(
255            outputs=outputs,
256            errors=errors,
257            runtime_s=runtime_s,
258            error_type="a_priori",
259            extra=extra,
260        )

Implements the Simplified Set-membership Partial-Update Affine-Projection (SM-Simp-PUAP) algorithm for complex-valued data. (Algorithm 6.6, Diniz)

Note

The original implementation warns that this algorithm is under development and may be unstable for complex-valued simulations.

SimplifiedSMPUAP( filter_order: int, gamma_bar: float, gamma: float, L: int, up_selector: Union[numpy.ndarray, list], w_init: Union[numpy.ndarray, list, NoneType] = None)
44    def __init__(
45        self,
46        filter_order: int,
47        gamma_bar: float,
48        gamma: float,
49        L: int,
50        up_selector: Union[np.ndarray, list],
51        w_init: Optional[Union[np.ndarray, list]] = None,
52    ) -> None:
53        """
54        Parameters
55        ----------
56        filter_order:
57            FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1.
58        gamma_bar:
59            Error magnitude threshold for triggering updates.
60        gamma:
61            Regularization factor for the AP correlation matrix.
62        L:
63            Reuse data factor / constraint length (projection order).
64        up_selector:
65            Partial-update selector matrix with shape (M+1, N), entries in {0,1}.
66            Each column selects which coefficients are updated at iteration k.
67        w_init:
68            Optional initial coefficient vector. If None, initializes to zeros.
69        """
70        warnings.warn(
71            "SM-Simp-PUAP is currently under development and may not produce intended results. "
72            "Instability or divergence (high MSE) has been observed in complex-valued simulations.",
73            UserWarning,
74        )
75
76        super().__init__(filter_order=filter_order, w_init=w_init)
77
78        self.gamma_bar = float(gamma_bar)
79        self.gamma = float(gamma)
80        self.L = int(L)
81        self.n_coeffs = int(self.filter_order + 1)
82
83        sel = np.asarray(up_selector)
84        if sel.ndim != 2:
85            raise ValueError("up_selector must be a 2D array with shape (M+1, N).")
86        if sel.shape[0] != self.n_coeffs:
87            raise ValueError(
88                f"up_selector must have shape (M+1, N) with M+1={self.n_coeffs}, got {sel.shape}."
89            )
90        self.up_selector: np.ndarray = sel
91
92        # Regressor matrix: columns are current/past regressors (x_k, x_{k-1}, ..., x_{k-L})
93        self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
94
95        # Backwards-compat alias
96        self.X_matrix = self.regressor_matrix
97
98        # Bookkeeping
99        self.n_updates: int = 0

Parameters

filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. gamma_bar: Error magnitude threshold for triggering updates. gamma: Regularization factor for the AP correlation matrix. L: Reuse data factor / constraint length (projection order). up_selector: Partial-update selector matrix with shape (M+1, N), entries in {0,1}. Each column selects which coefficients are updated at iteration k. w_init: Optional initial coefficient vector. If None, initializes to zeros.

supports_complex: bool = True
gamma_bar: float
gamma: float
L: int
n_coeffs: int
up_selector: numpy.ndarray
regressor_matrix: numpy.ndarray
X_matrix
n_updates: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
101    @validate_input
102    def optimize(
103        self,
104        input_signal: np.ndarray,
105        desired_signal: np.ndarray,
106        verbose: bool = False,
107        return_internal_states: bool = False,
108    ) -> OptimizationResult:
109        """
110        Executes the SM-Simp-PUAP adaptation.
111
112        Parameters
113        ----------
114        input_signal:
115            Input signal x[k].
116        desired_signal:
117            Desired signal d[k].
118        verbose:
119            If True, prints runtime and update count.
120        return_internal_states:
121            If True, includes internal trajectories in result.extra.
122
123        Returns
124        -------
125        OptimizationResult
126            outputs:
127                A-priori output y[k] (first component of AP output vector).
128            errors:
129                A-priori error e[k] (first component of AP error vector).
130            coefficients:
131                History of coefficients stored in the base class.
132            error_type:
133                "a_priori".
134
135        Extra (always)
136        -------------
137        extra["n_updates"]:
138            Number of coefficient updates (iterations where |e(k)| > gamma_bar).
139        extra["update_mask"]:
140            Boolean array marking which iterations performed updates.
141
142        Extra (when return_internal_states=True)
143        --------------------------------------
144        extra["mu"]:
145            Trajectory of mu[k] (0 when no update).
146        extra["selected_count"]:
147            Number of selected coefficients each iteration.
148        """
149        tic: float = time()
150
151        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
152        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
153
154        n_samples: int = int(d.size)
155        n_coeffs: int = int(self.n_coeffs)
156        Lp1: int = int(self.L + 1)
157
158        # Validate selector length vs iterations
159        if self.up_selector.shape[1] < n_samples:
160            raise ValueError(
161                f"up_selector has {self.up_selector.shape[1]} columns, but signal has {n_samples} samples."
162            )
163
164        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
165        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
166
167        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
168
169        mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
170        selcnt_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=int) if return_internal_states else None
171
172        self.n_updates = 0
173        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
174
175        # Padding (matches original slicing/indexing)
176        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
177        prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d])
178
179        # u1 = [1, 0, 0, ..., 0]^T  (selects first component)
180        u1: np.ndarray = np.zeros((Lp1, 1), dtype=complex)
181        u1[0, 0] = 1.0
182
183        for k in range(n_samples):
184            # Update regressor matrix
185            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
186            start_idx = k + n_coeffs - 1
187            stop = (k - 1) if (k > 0) else None
188            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
189
190            # AP a-priori output/error vectors
191            output_ap_conj: np.ndarray = (self.regressor_matrix.conj().T) @ w_current  # (L+1,1)
192            desired_slice = prefixed_desired[k + self.L : stop : -1]
193            error_ap_conj: np.ndarray = desired_slice.conj().reshape(-1, 1) - output_ap_conj  # (L+1,1)
194
195            yk: complex = complex(output_ap_conj[0, 0])
196            ek: complex = complex(error_ap_conj[0, 0])
197
198            outputs[k] = yk
199            errors[k] = ek
200
201            eabs: float = float(np.abs(ek))
202            if eabs > self.gamma_bar:
203                self.n_updates += 1
204                update_mask[k] = True
205                mu: float = float(1.0 - (self.gamma_bar / eabs))
206            else:
207                mu = 0.0
208
209            # Partial-update selector for this iteration (column k): shape (M+1,1)
210            c_vec: np.ndarray = self.up_selector[:, k].reshape(-1, 1).astype(float)
211
212            if return_internal_states and selcnt_track is not None:
213                selcnt_track[k] = int(np.sum(c_vec != 0))
214
215            if mu > 0.0:
216                # Apply selection (element-wise) to regressor matrix
217                C_reg: np.ndarray = c_vec * self.regressor_matrix  # (M+1, L+1)
218
219                # R = X^H C X  (as in original: regressor_matrix^H @ C_reg)
220                R: np.ndarray = (self.regressor_matrix.conj().T @ C_reg) + self.gamma * np.eye(Lp1)
221
222                rhs: np.ndarray = mu * ek * u1  # (L+1,1)
223
224                try:
225                    inv_term = np.linalg.solve(R, rhs)
226                except np.linalg.LinAlgError:
227                    inv_term = np.linalg.pinv(R) @ rhs
228
229                w_current = w_current + (C_reg @ inv_term)
230
231            if return_internal_states and mu_track is not None:
232                mu_track[k] = mu
233
234            # Commit coefficients + history
235            self.w = w_current.ravel()
236            self._record_history()
237
238        runtime_s: float = float(time() - tic)
239        if verbose:
240            print(f"[SM-Simp-PUAP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms")
241
242        extra: Dict[str, Any] = {
243            "n_updates": int(self.n_updates),
244            "update_mask": update_mask,
245        }
246        if return_internal_states:
247            extra.update(
248                {
249                    "mu": mu_track,
250                    "selected_count": selcnt_track,
251                }
252            )
253
254        return self._pack_results(
255            outputs=outputs,
256            errors=errors,
257            runtime_s=runtime_s,
258            error_type="a_priori",
259            extra=extra,
260        )

Executes the SM-Simp-PUAP adaptation.

Parameters

input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime and update count. return_internal_states: If True, includes internal trajectories in result.extra.

Returns

OptimizationResult outputs: A-priori output y[k] (first component of AP output vector). errors: A-priori error e[k] (first component of AP error vector). coefficients: History of coefficients stored in the base class. error_type: "a_priori".

Extra (always)

extra["n_updates"]: Number of coefficient updates (iterations where |e(k)| > gamma_bar). extra["update_mask"]: Boolean array marking which iterations performed updates.

Extra (when return_internal_states=True)

extra["mu"]: Trajectory of mu[k] (0 when no update). extra["selected_count"]: Number of selected coefficients each iteration.

class SimplifiedSMAP(pydaptivefiltering.AdaptiveFilter):
 26class SimplifiedSMAP(AdaptiveFilter):
 27    """
 28    Simplified Set-Membership Affine Projection (SM-Simp-AP) adaptive filter
 29    (complex-valued).
 30
 31    Implements Algorithm 6.3 (Diniz). This is a simplified affine-projection
 32    set-membership scheme where an AP-style regressor matrix of length ``L+1``
 33    is maintained, but **the update uses only the most recent column** (the
 34    current regressor vector). Updates occur only when the a priori error
 35    magnitude exceeds ``gamma_bar``.
 36
 37    Parameters
 38    ----------
 39    filter_order : int
 40        FIR filter order ``M`` (number of coefficients is ``M + 1``).
 41    gamma_bar : float
 42        Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude.
 43        An update occurs only if ``|e[k]| > gamma_bar``.
 44    gamma : float
 45        Regularization constant used in the normalization denominator
 46        ``gamma + ||x_k||^2``.
 47    L : int
 48        Reuse data factor / constraint length. In this simplified variant it
 49        mainly determines the number of columns kept in the internal AP-style
 50        regressor matrix (size ``(M+1) x (L+1)``); only the first column is used
 51        in the update.
 52    w_init : array_like of complex, optional
 53        Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros.
 54
 55    Notes
 56    -----
 57    Regressor definition
 58        The current tapped-delay regressor is
 59
 60        .. math::
 61            x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1}.
 62
 63        Internally, the algorithm maintains an AP regressor matrix
 64
 65        .. math::
 66            X_k = [x_k, x_{k-1}, \\dots, x_{k-L}] \\in \\mathbb{C}^{(M+1)\\times(L+1)},
 67
 68        but the update uses only the first column ``x_k``.
 69
 70    A priori output and error (as implemented)
 71        This implementation computes
 72
 73        .. math::
 74            y[k] = x_k^H w[k],
 75
 76        and stores it as ``outputs[k]``.
 77        The stored error is
 78
 79        .. math::
 80            e[k] = d^*[k] - y[k].
 81
 82        (This matches the semantics of your code; many texts use
 83        ``e[k] = d[k] - w^H x_k``. If you want the textbook convention, you’d
 84        remove the conjugation on ``d[k]`` and ensure ``y[k]=w^H x_k``.)
 85
 86    Set-membership condition
 87        If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed.
 88
 89        If ``|e[k]| > \\bar{\\gamma}``, define the scalar step factor
 90
 91        .. math::
 92            s[k] = \\left(1 - \\frac{\\bar{\\gamma}}{|e[k]|}\\right) e[k].
 93
 94    Normalized update (simplified AP)
 95        With ``\\mathrm{den}[k] = \\gamma + \\|x_k\\|^2``, the coefficient update is
 96
 97        .. math::
 98            w[k+1] = w[k] + \\frac{s[k]}{\\mathrm{den}[k]} \\, x_k.
 99
100    Returned error type
101        The returned sequences correspond to **a priori** quantities (computed
102        before updating ``w``), so ``error_type="a_priori"``.
103
104    References
105    ----------
106    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
107       Implementation*, Algorithm 6.3.
108    """
109    supports_complex: bool = True
110    gamma_bar: float
111    gamma: float
112    L: int
113    n_coeffs: int
114
115    def __init__(
116        self,
117        filter_order: int,
118        gamma_bar: float,
119        gamma: float,
120        L: int,
121        w_init: Optional[Union[np.ndarray, list]] = None,
122    ) -> None:
123        super().__init__(filter_order=filter_order, w_init=w_init)
124
125        self.gamma_bar = float(gamma_bar)
126        self.gamma = float(gamma)
127        self.L = int(L)
128        self.n_coeffs = int(self.filter_order + 1)
129
130        self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
131
132        self.X_matrix = self.regressor_matrix
133
134        self.n_updates: int = 0
135
136    @validate_input
137    def optimize(
138        self,
139        input_signal: np.ndarray,
140        desired_signal: np.ndarray,
141        verbose: bool = False,
142        return_internal_states: bool = False,
143    ) -> OptimizationResult:
144        """
145        Executes the SM-Simp-AP adaptation.
146
147        Parameters
148        ----------
149        input_signal : array_like of complex
150            Input sequence ``x[k]``, shape ``(N,)`` (flattened internally).
151        desired_signal : array_like of complex
152            Desired sequence ``d[k]``, shape ``(N,)`` (flattened internally).
153        verbose : bool, optional
154            If True, prints runtime and update statistics after completion.
155        return_internal_states : bool, optional
156            If True, includes internal trajectories in ``result.extra``:
157            ``step_factor`` and ``den`` (each length ``N``). Entries are zero
158            when no update occurs.
159
160        Returns
161        -------
162        OptimizationResult
163            Result object with fields:
164            - outputs : ndarray of complex, shape ``(N,)``
165                A priori output sequence.
166            - errors : ndarray of complex, shape ``(N,)``
167                A priori error sequence (as in code: ``e[k] = conj(d[k]) - y[k]``).
168            - coefficients : ndarray of complex
169                Coefficient history recorded by the base class.
170            - error_type : str
171                Set to ``"a_priori"``.
172            - extra : dict
173                Always present with:
174                - ``"n_updates"`` : int
175                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
176                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
177                    Boolean mask indicating which iterations performed updates.
178                Additionally present only if ``return_internal_states=True``:
179                - ``"step_factor"`` : ndarray of complex, shape ``(N,)``
180                    Scalar factor ``(1 - gamma_bar/|e|) * e`` (0 when no update).
181                - ``"den"`` : ndarray of float, shape ``(N,)``
182                    Denominator ``gamma + ||x_k||^2`` (0 when no update).
183        """
184        tic: float = time()
185
186        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
187        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
188
189        n_samples: int = int(d.size)
190        n_coeffs: int = int(self.n_coeffs)
191
192        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
193        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
194
195        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
196
197        step_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
198        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
199
200        self.n_updates = 0
201        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
202
203        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
204
205        for k in range(n_samples):
206            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
207
208            start_idx = k + n_coeffs - 1
209            stop = (k - 1) if (k > 0) else None
210            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
211
212            xk: np.ndarray = self.regressor_matrix[:, 0:1]
213
214            output_k: complex = complex((xk.conj().T @ w_current).item())
215            error_k: complex = complex(np.conj(d[k]) - output_k)
216
217            outputs[k] = output_k
218            errors[k] = error_k
219
220            eabs: float = float(np.abs(error_k))
221
222            if eabs > self.gamma_bar:
223                self.n_updates += 1
224                update_mask[k] = True
225
226                step_factor: complex = complex((1.0 - (self.gamma_bar / eabs)) * error_k)
227
228                norm_sq: float = float(np.real((xk.conj().T @ xk).item()))
229                den: float = float(self.gamma + norm_sq)
230                if den <= 0.0:
231                    den = float(self.gamma + 1e-30)
232
233                w_current = w_current + (step_factor / den) * xk
234
235                if return_internal_states:
236                    if step_track is not None:
237                        step_track[k] = step_factor
238                    if den_track is not None:
239                        den_track[k] = den
240            else:
241                if return_internal_states:
242                    if step_track is not None:
243                        step_track[k] = 0.0 + 0.0j
244                    if den_track is not None:
245                        den_track[k] = 0.0
246
247            self.w = w_current.ravel()
248            self._record_history()
249
250        runtime_s: float = float(time() - tic)
251        if verbose:
252            print(f"[SM-Simp-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms")
253
254        extra: Dict[str, Any] = {
255            "n_updates": int(self.n_updates),
256            "update_mask": update_mask,
257        }
258        if return_internal_states:
259            extra.update(
260                {
261                    "step_factor": step_track,
262                    "den": den_track,
263                }
264            )
265
266        return self._pack_results(
267            outputs=outputs,
268            errors=errors,
269            runtime_s=runtime_s,
270            error_type="a_priori",
271            extra=extra,
272        )

Simplified Set-Membership Affine Projection (SM-Simp-AP) adaptive filter (complex-valued).

Implements Algorithm 6.3 (Diniz). This is a simplified affine-projection set-membership scheme where an AP-style regressor matrix of length L+1 is maintained, but the update uses only the most recent column (the current regressor vector). Updates occur only when the a priori error magnitude exceeds gamma_bar.

Parameters

filter_order : int FIR filter order M (number of coefficients is M + 1). gamma_bar : float Set-membership bound \bar{\gamma} for the a priori error magnitude. An update occurs only if |e[k]| > gamma_bar. gamma : float Regularization constant used in the normalization denominator gamma + ||x_k||^2. L : int Reuse data factor / constraint length. In this simplified variant it mainly determines the number of columns kept in the internal AP-style regressor matrix (size (M+1) x (L+1)); only the first column is used in the update. w_init : array_like of complex, optional Initial coefficient vector w(0), shape (M + 1,). If None, zeros.

Notes

Regressor definition The current tapped-delay regressor is

$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$

Internally, the algorithm maintains an AP regressor matrix

$$X_k = [x_k, x_{k-1}, \dots, x_{k-L}] \in \mathbb{C}^{(M+1)\times(L+1)},$$

but the update uses only the first column ``x_k``.

A priori output and error (as implemented) This implementation computes

$$y[k] = x_k^H w[k],$$

and stores it as ``outputs[k]``.
The stored error is

$$e[k] = d^*[k] - y[k].$$

(This matches the semantics of your code; many texts use
``e[k] = d[k] - w^H x_k``. If you want the textbook convention, you’d
remove the conjugation on ``d[k]`` and ensure ``y[k]=w^H x_k``.)

Set-membership condition If |e[k]| \le \bar{\gamma}, no update is performed.

If ``|e[k]| > \bar{\gamma}``, define the scalar step factor

$$s[k] = \left(1 - \frac{\bar{\gamma}}{|e[k]|}\right) e[k].$$

Normalized update (simplified AP) With \mathrm{den}[k] = \gamma + \|x_k\|^2, the coefficient update is

$$w[k+1] = w[k] + \frac{s[k]}{\mathrm{den}[k]} \, x_k.$$

Returned error type The returned sequences correspond to a priori quantities (computed before updating w), so error_type="a_priori".

References


SimplifiedSMAP( filter_order: int, gamma_bar: float, gamma: float, L: int, w_init: Union[numpy.ndarray, list, NoneType] = None)
115    def __init__(
116        self,
117        filter_order: int,
118        gamma_bar: float,
119        gamma: float,
120        L: int,
121        w_init: Optional[Union[np.ndarray, list]] = None,
122    ) -> None:
123        super().__init__(filter_order=filter_order, w_init=w_init)
124
125        self.gamma_bar = float(gamma_bar)
126        self.gamma = float(gamma)
127        self.L = int(L)
128        self.n_coeffs = int(self.filter_order + 1)
129
130        self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex)
131
132        self.X_matrix = self.regressor_matrix
133
134        self.n_updates: int = 0
supports_complex: bool = True
gamma_bar: float
gamma: float
L: int
n_coeffs: int
regressor_matrix: numpy.ndarray
X_matrix
n_updates: int
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
136    @validate_input
137    def optimize(
138        self,
139        input_signal: np.ndarray,
140        desired_signal: np.ndarray,
141        verbose: bool = False,
142        return_internal_states: bool = False,
143    ) -> OptimizationResult:
144        """
145        Executes the SM-Simp-AP adaptation.
146
147        Parameters
148        ----------
149        input_signal : array_like of complex
150            Input sequence ``x[k]``, shape ``(N,)`` (flattened internally).
151        desired_signal : array_like of complex
152            Desired sequence ``d[k]``, shape ``(N,)`` (flattened internally).
153        verbose : bool, optional
154            If True, prints runtime and update statistics after completion.
155        return_internal_states : bool, optional
156            If True, includes internal trajectories in ``result.extra``:
157            ``step_factor`` and ``den`` (each length ``N``). Entries are zero
158            when no update occurs.
159
160        Returns
161        -------
162        OptimizationResult
163            Result object with fields:
164            - outputs : ndarray of complex, shape ``(N,)``
165                A priori output sequence.
166            - errors : ndarray of complex, shape ``(N,)``
167                A priori error sequence (as in code: ``e[k] = conj(d[k]) - y[k]``).
168            - coefficients : ndarray of complex
169                Coefficient history recorded by the base class.
170            - error_type : str
171                Set to ``"a_priori"``.
172            - extra : dict
173                Always present with:
174                - ``"n_updates"`` : int
175                    Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``).
176                - ``"update_mask"`` : ndarray of bool, shape ``(N,)``
177                    Boolean mask indicating which iterations performed updates.
178                Additionally present only if ``return_internal_states=True``:
179                - ``"step_factor"`` : ndarray of complex, shape ``(N,)``
180                    Scalar factor ``(1 - gamma_bar/|e|) * e`` (0 when no update).
181                - ``"den"`` : ndarray of float, shape ``(N,)``
182                    Denominator ``gamma + ||x_k||^2`` (0 when no update).
183        """
184        tic: float = time()
185
186        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
187        d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel()
188
189        n_samples: int = int(d.size)
190        n_coeffs: int = int(self.n_coeffs)
191
192        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
193        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
194
195        update_mask: np.ndarray = np.zeros(n_samples, dtype=bool)
196
197        step_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
198        den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
199
200        self.n_updates = 0
201        w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1)
202
203        prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x])
204
205        for k in range(n_samples):
206            self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1]
207
208            start_idx = k + n_coeffs - 1
209            stop = (k - 1) if (k > 0) else None
210            self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1]
211
212            xk: np.ndarray = self.regressor_matrix[:, 0:1]
213
214            output_k: complex = complex((xk.conj().T @ w_current).item())
215            error_k: complex = complex(np.conj(d[k]) - output_k)
216
217            outputs[k] = output_k
218            errors[k] = error_k
219
220            eabs: float = float(np.abs(error_k))
221
222            if eabs > self.gamma_bar:
223                self.n_updates += 1
224                update_mask[k] = True
225
226                step_factor: complex = complex((1.0 - (self.gamma_bar / eabs)) * error_k)
227
228                norm_sq: float = float(np.real((xk.conj().T @ xk).item()))
229                den: float = float(self.gamma + norm_sq)
230                if den <= 0.0:
231                    den = float(self.gamma + 1e-30)
232
233                w_current = w_current + (step_factor / den) * xk
234
235                if return_internal_states:
236                    if step_track is not None:
237                        step_track[k] = step_factor
238                    if den_track is not None:
239                        den_track[k] = den
240            else:
241                if return_internal_states:
242                    if step_track is not None:
243                        step_track[k] = 0.0 + 0.0j
244                    if den_track is not None:
245                        den_track[k] = 0.0
246
247            self.w = w_current.ravel()
248            self._record_history()
249
250        runtime_s: float = float(time() - tic)
251        if verbose:
252            print(f"[SM-Simp-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms")
253
254        extra: Dict[str, Any] = {
255            "n_updates": int(self.n_updates),
256            "update_mask": update_mask,
257        }
258        if return_internal_states:
259            extra.update(
260                {
261                    "step_factor": step_track,
262                    "den": den_track,
263                }
264            )
265
266        return self._pack_results(
267            outputs=outputs,
268            errors=errors,
269            runtime_s=runtime_s,
270            error_type="a_priori",
271            extra=extra,
272        )

Executes the SM-Simp-AP adaptation.

Parameters

input_signal : array_like of complex Input sequence x[k], shape (N,) (flattened internally). desired_signal : array_like of complex Desired sequence d[k], shape (N,) (flattened internally). verbose : bool, optional If True, prints runtime and update statistics after completion. return_internal_states : bool, optional If True, includes internal trajectories in result.extra: step_factor and den (each length N). Entries are zero when no update occurs.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence. - errors : ndarray of complex, shape (N,) A priori error sequence (as in code: e[k] = conj(d[k]) - y[k]). - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always present with: - "n_updates" : int Number of coefficient updates (iterations where |e[k]| > gamma_bar). - "update_mask" : ndarray of bool, shape (N,) Boolean mask indicating which iterations performed updates. Additionally present only if return_internal_states=True: - "step_factor" : ndarray of complex, shape (N,) Scalar factor (1 - gamma_bar/|e|) * e (0 when no update). - "den" : ndarray of float, shape (N,) Denominator gamma + ||x_k||^2 (0 when no update).

class LRLSPosteriori(pydaptivefiltering.AdaptiveFilter):
 25class LRLSPosteriori(AdaptiveFilter):
 26    """
 27    Lattice RLS using a posteriori errors (LRLS, a posteriori form), complex-valued.
 28
 29    Implements Diniz (Algorithm 7.1) in a lattice/ladder structure:
 30
 31    1) **Lattice prediction stage** (order ``M``):
 32       Updates forward/backward a posteriori prediction errors and energy terms
 33       using exponentially weighted recursions.
 34
 35    2) **Ladder (joint-process) stage** (length ``M+1``):
 36       Updates the ladder coefficients ``v`` and produces the **a posteriori**
 37       output error by progressively "whitening" the desired sample through the
 38       backward-error vector.
 39
 40    Library conventions
 41    -------------------
 42    - Complex-valued implementation (``supports_complex=True``).
 43    - Ladder coefficients are stored in ``self.v`` with length ``M+1``.
 44    - For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`,
 45      ``self.w`` mirrors ``self.v`` at each iteration and the base-class history
 46      corresponds to the ladder coefficient trajectory.
 47
 48    Parameters
 49    ----------
 50    filter_order : int
 51        Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients.
 52    lambda_factor : float, optional
 53        Forgetting factor ``lambda`` used in the exponentially weighted recursions.
 54        Default is 0.99.
 55    epsilon : float, optional
 56        Initialization/regularization constant for the energy variables
 57        (forward/backward). Default is 0.1.
 58    w_init : array_like of complex, optional
 59        Optional initial ladder coefficients of length ``M+1``. If None, initializes
 60        with zeros.
 61    denom_floor : float, optional
 62        Small positive floor used to avoid division by (near) zero in normalization
 63        terms (``gamma`` variables and energy denominators). Default is 1e-12.
 64    xi_floor : float, optional
 65        Floor applied to energy variables to keep them positive. If None, defaults
 66        to ``epsilon``.
 67
 68    Notes
 69    -----
 70    Signals and dimensions
 71    ~~~~~~~~~~~~~~~~~~~~~~
 72    For lattice order ``M``:
 73
 74    - ``delta`` has shape ``(M,)`` (lattice delta state)
 75    - ``xi_f`` and ``xi_b`` have shape ``(M+1,)`` (forward/backward energies)
 76    - ``error_b_prev`` and the per-sample ``curr_err_b`` have shape ``(M+1,)``
 77      (backward-error vectors)
 78    - ``v`` and ``delta_v`` have shape ``(M+1,)`` (ladder state and coefficients)
 79
 80    A posteriori error (as returned)
 81    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 82    The ladder stage starts with ``e_post = d[k]`` and updates it as:
 83
 84    .. math::
 85        e_{post}(k) \\leftarrow e_{post}(k) - v_m^*(k)\\, b_m(k),
 86
 87    where :math:`b_m(k)` are the components of the backward-error vector.
 88    The final ``e_post`` is the **a posteriori error** returned in ``errors[k]``,
 89    while the output estimate is returned as ``outputs[k] = d[k] - e_post``.
 90
 91    References
 92    ----------
 93    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 94       Implementation*, Algorithm 7.1.
 95    """
 96
 97    supports_complex: bool = True
 98
 99    def __init__(
100        self,
101        filter_order: int,
102        lambda_factor: float = 0.99,
103        epsilon: float = 0.1,
104        w_init: Optional[Union[np.ndarray, list]] = None,
105        denom_floor: float = 1e-12,
106        xi_floor: Optional[float] = None,
107    ) -> None:
108        """
109        Parameters
110        ----------
111        filter_order:
112            Number of lattice sections M. Ladder has M+1 coefficients.
113        lambda_factor:
114            Forgetting factor λ.
115        epsilon:
116            Energy initialization / regularization.
117        w_init:
118            Optional initial ladder coefficient vector (length M+1). If None, zeros.
119        denom_floor:
120            Floor used to avoid division by (near) zero in normalization terms.
121        xi_floor:
122            Floor used to keep energies positive (defaults to epsilon).
123        """
124        super().__init__(filter_order=filter_order, w_init=w_init)
125
126        self.lam = float(lambda_factor)
127        self.epsilon = float(epsilon)
128        self.n_sections = int(filter_order)
129
130        self._tiny = float(denom_floor)
131        self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon)
132
133        self.delta = np.zeros(self.n_sections, dtype=complex)
134        self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
135        self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
136        self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
137
138        if w_init is not None:
139            v0 = np.asarray(w_init, dtype=complex).reshape(-1)
140            if v0.size != self.n_sections + 1:
141                raise ValueError(
142                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
143                )
144            self.v = v0
145        else:
146            self.v = np.zeros(self.n_sections + 1, dtype=complex)
147
148        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
149
150        self.w = self.v.copy()
151        self.w_history = []
152        self._record_history()
153
154    @validate_input
155    def optimize(
156        self,
157        input_signal: np.ndarray,
158        desired_signal: np.ndarray,
159        verbose: bool = False,
160        return_internal_states: bool = False,
161    ) -> OptimizationResult:
162        """
163        Executes LRLS adaptation (a posteriori form) over paired sequences ``x[k]`` and ``d[k]``.
164
165        Parameters
166        ----------
167        input_signal : array_like of complex
168            Input sequence ``x[k]`` with shape ``(N,)``.
169        desired_signal : array_like of complex
170            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
171        verbose : bool, optional
172            If True, prints the total runtime after completion.
173        return_internal_states : bool, optional
174            If True, returns selected *final* internal states in ``result.extra``
175            (not full trajectories).
176
177        Returns
178        -------
179        OptimizationResult
180            Result object with fields:
181            - outputs : ndarray of complex, shape ``(N,)``
182                Estimated output sequence. In this implementation:
183                ``outputs[k] = d[k] - e_post[k]``.
184            - errors : ndarray of complex, shape ``(N,)``
185                A posteriori error produced by the ladder stage (final ``e_post``).
186            - coefficients : ndarray
187                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
188            - error_type : str
189                Set to ``"a_posteriori"``.
190            - extra : dict, optional
191                Present only if ``return_internal_states=True`` (see below).
192
193        Extra (when return_internal_states=True)
194        --------------------------------------
195        xi_f : ndarray of float, shape ``(M+1,)``
196            Final forward energies.
197        xi_b : ndarray of float, shape ``(M+1,)``
198            Final backward energies.
199        delta : ndarray of complex, shape ``(M,)``
200            Final lattice delta state.
201        delta_v : ndarray of complex, shape ``(M+1,)``
202            Final ladder delta state used to compute ``v``.
203        """
204        t0 = perf_counter()
205
206        x_in = np.asarray(input_signal, dtype=complex).ravel()
207        d_in = np.asarray(desired_signal, dtype=complex).ravel()
208
209        n_samples = int(d_in.size)
210        outputs = np.zeros(n_samples, dtype=complex)
211        errors  = np.zeros(n_samples, dtype=complex)
212
213        for k in range(n_samples):
214            err_f = complex(x_in[k])
215
216            curr_err_b = np.zeros(self.n_sections + 1, dtype=complex)
217            curr_err_b[0] = x_in[k]
218
219            energy_x = float(np.real(err_f * np.conj(err_f)))
220            self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor)
221            self.xi_b[0] = self.xi_f[0]
222
223            gamma_m = 1.0
224
225            for m in range(self.n_sections):
226                denom_g = max(gamma_m, self._tiny)
227
228                self.delta[m] = (
229                    self.lam * self.delta[m]
230                    + (self.error_b_prev[m] * np.conj(err_f)) / denom_g
231                )
232
233                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny)
234                kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny)
235
236                new_err_f = err_f - kappa_f * self.error_b_prev[m]
237                curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f
238
239                self.xi_f[m + 1] = max(
240                    self.lam * self.xi_f[m + 1]
241                    + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g,
242                    self._xi_floor,
243                )
244                self.xi_b[m + 1] = max(
245                    self.lam * self.xi_b[m + 1]
246                    + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g,
247                    self._xi_floor,
248                )
249
250                denom_xib = self.xi_b[m] + self._tiny
251                energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m])))
252                gamma_m_next = gamma_m - (energy_b_curr / denom_xib)
253
254                gamma_m = max(gamma_m_next, self._tiny)
255                err_f = new_err_f
256
257            e_post = complex(d_in[k])
258            gamma_ladder = 1.0
259
260            for m in range(self.n_sections + 1):
261                denom_gl = max(gamma_ladder, self._tiny)
262
263                self.delta_v[m] = (
264                    self.lam * self.delta_v[m]
265                    + (curr_err_b[m] * np.conj(e_post)) / denom_gl
266                )
267
268                self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny)
269
270                e_post = e_post - np.conj(self.v[m]) * curr_err_b[m]
271
272                denom_xib_m = self.xi_b[m] + self._tiny
273                energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m])))
274                gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m)
275                gamma_ladder = max(gamma_ladder_next, self._tiny)
276
277            outputs[k] = d_in[k] - e_post
278            errors[k] = e_post
279
280            self.error_b_prev = curr_err_b.copy()
281
282            self.w = self.v.copy()
283            self._record_history()
284
285        runtime_s = float(perf_counter() - t0)
286        if verbose:
287            print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms")
288
289        extra: Optional[Dict[str, Any]] = None
290        if return_internal_states:
291            extra = {
292                "xi_f": self.xi_f.copy(),
293                "xi_b": self.xi_b.copy(),
294                "delta": self.delta.copy(),
295                "delta_v": self.delta_v.copy(),
296            }
297
298        return self._pack_results(
299            outputs=outputs,
300            errors=errors,
301            runtime_s=runtime_s,
302            error_type="a_posteriori",
303            extra=extra,
304        )

Lattice RLS using a posteriori errors (LRLS, a posteriori form), complex-valued.

Implements Diniz (Algorithm 7.1) in a lattice/ladder structure:

1) Lattice prediction stage (order M): Updates forward/backward a posteriori prediction errors and energy terms using exponentially weighted recursions.

2) Ladder (joint-process) stage (length M+1): Updates the ladder coefficients v and produces the a posteriori output error by progressively "whitening" the desired sample through the backward-error vector.

Library conventions

  • Complex-valued implementation (supports_complex=True).
  • Ladder coefficients are stored in self.v with length M+1.
  • For compatibility with ~pydaptivefiltering.base.AdaptiveFilter, self.w mirrors self.v at each iteration and the base-class history corresponds to the ladder coefficient trajectory.

Parameters

filter_order : int Lattice order M (number of sections). The ladder has M+1 coefficients. lambda_factor : float, optional Forgetting factor lambda used in the exponentially weighted recursions. Default is 0.99. epsilon : float, optional Initialization/regularization constant for the energy variables (forward/backward). Default is 0.1. w_init : array_like of complex, optional Optional initial ladder coefficients of length M+1. If None, initializes with zeros. denom_floor : float, optional Small positive floor used to avoid division by (near) zero in normalization terms (gamma variables and energy denominators). Default is 1e-12. xi_floor : float, optional Floor applied to energy variables to keep them positive. If None, defaults to epsilon.

Notes

Signals and dimensions ~~~~~~ For lattice order M:

  • delta has shape (M,) (lattice delta state)
  • xi_f and xi_b have shape (M+1,) (forward/backward energies)
  • error_b_prev and the per-sample curr_err_b have shape (M+1,) (backward-error vectors)
  • v and delta_v have shape (M+1,) (ladder state and coefficients)

A posteriori error (as returned) ~~~~~~~~~ The ladder stage starts with e_post = d[k] and updates it as:

$$e_{post}(k) \leftarrow e_{post}(k) - v_m^*(k)\, b_m(k),$$

where \( b_m(k) \) are the components of the backward-error vector. The final e_post is the a posteriori error returned in errors[k], while the output estimate is returned as outputs[k] = d[k] - e_post.

References


LRLSPosteriori( filter_order: int, lambda_factor: float = 0.99, epsilon: float = 0.1, w_init: Union[numpy.ndarray, list, NoneType] = None, denom_floor: float = 1e-12, xi_floor: Optional[float] = None)
 99    def __init__(
100        self,
101        filter_order: int,
102        lambda_factor: float = 0.99,
103        epsilon: float = 0.1,
104        w_init: Optional[Union[np.ndarray, list]] = None,
105        denom_floor: float = 1e-12,
106        xi_floor: Optional[float] = None,
107    ) -> None:
108        """
109        Parameters
110        ----------
111        filter_order:
112            Number of lattice sections M. Ladder has M+1 coefficients.
113        lambda_factor:
114            Forgetting factor λ.
115        epsilon:
116            Energy initialization / regularization.
117        w_init:
118            Optional initial ladder coefficient vector (length M+1). If None, zeros.
119        denom_floor:
120            Floor used to avoid division by (near) zero in normalization terms.
121        xi_floor:
122            Floor used to keep energies positive (defaults to epsilon).
123        """
124        super().__init__(filter_order=filter_order, w_init=w_init)
125
126        self.lam = float(lambda_factor)
127        self.epsilon = float(epsilon)
128        self.n_sections = int(filter_order)
129
130        self._tiny = float(denom_floor)
131        self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon)
132
133        self.delta = np.zeros(self.n_sections, dtype=complex)
134        self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
135        self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
136        self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
137
138        if w_init is not None:
139            v0 = np.asarray(w_init, dtype=complex).reshape(-1)
140            if v0.size != self.n_sections + 1:
141                raise ValueError(
142                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
143                )
144            self.v = v0
145        else:
146            self.v = np.zeros(self.n_sections + 1, dtype=complex)
147
148        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
149
150        self.w = self.v.copy()
151        self.w_history = []
152        self._record_history()

Parameters

filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms. xi_floor: Floor used to keep energies positive (defaults to epsilon).

supports_complex: bool = True
lam
epsilon
n_sections
delta
xi_f
xi_b
error_b_prev
delta_v
w
w_history
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
154    @validate_input
155    def optimize(
156        self,
157        input_signal: np.ndarray,
158        desired_signal: np.ndarray,
159        verbose: bool = False,
160        return_internal_states: bool = False,
161    ) -> OptimizationResult:
162        """
163        Executes LRLS adaptation (a posteriori form) over paired sequences ``x[k]`` and ``d[k]``.
164
165        Parameters
166        ----------
167        input_signal : array_like of complex
168            Input sequence ``x[k]`` with shape ``(N,)``.
169        desired_signal : array_like of complex
170            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
171        verbose : bool, optional
172            If True, prints the total runtime after completion.
173        return_internal_states : bool, optional
174            If True, returns selected *final* internal states in ``result.extra``
175            (not full trajectories).
176
177        Returns
178        -------
179        OptimizationResult
180            Result object with fields:
181            - outputs : ndarray of complex, shape ``(N,)``
182                Estimated output sequence. In this implementation:
183                ``outputs[k] = d[k] - e_post[k]``.
184            - errors : ndarray of complex, shape ``(N,)``
185                A posteriori error produced by the ladder stage (final ``e_post``).
186            - coefficients : ndarray
187                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
188            - error_type : str
189                Set to ``"a_posteriori"``.
190            - extra : dict, optional
191                Present only if ``return_internal_states=True`` (see below).
192
193        Extra (when return_internal_states=True)
194        --------------------------------------
195        xi_f : ndarray of float, shape ``(M+1,)``
196            Final forward energies.
197        xi_b : ndarray of float, shape ``(M+1,)``
198            Final backward energies.
199        delta : ndarray of complex, shape ``(M,)``
200            Final lattice delta state.
201        delta_v : ndarray of complex, shape ``(M+1,)``
202            Final ladder delta state used to compute ``v``.
203        """
204        t0 = perf_counter()
205
206        x_in = np.asarray(input_signal, dtype=complex).ravel()
207        d_in = np.asarray(desired_signal, dtype=complex).ravel()
208
209        n_samples = int(d_in.size)
210        outputs = np.zeros(n_samples, dtype=complex)
211        errors  = np.zeros(n_samples, dtype=complex)
212
213        for k in range(n_samples):
214            err_f = complex(x_in[k])
215
216            curr_err_b = np.zeros(self.n_sections + 1, dtype=complex)
217            curr_err_b[0] = x_in[k]
218
219            energy_x = float(np.real(err_f * np.conj(err_f)))
220            self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor)
221            self.xi_b[0] = self.xi_f[0]
222
223            gamma_m = 1.0
224
225            for m in range(self.n_sections):
226                denom_g = max(gamma_m, self._tiny)
227
228                self.delta[m] = (
229                    self.lam * self.delta[m]
230                    + (self.error_b_prev[m] * np.conj(err_f)) / denom_g
231                )
232
233                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny)
234                kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny)
235
236                new_err_f = err_f - kappa_f * self.error_b_prev[m]
237                curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f
238
239                self.xi_f[m + 1] = max(
240                    self.lam * self.xi_f[m + 1]
241                    + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g,
242                    self._xi_floor,
243                )
244                self.xi_b[m + 1] = max(
245                    self.lam * self.xi_b[m + 1]
246                    + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g,
247                    self._xi_floor,
248                )
249
250                denom_xib = self.xi_b[m] + self._tiny
251                energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m])))
252                gamma_m_next = gamma_m - (energy_b_curr / denom_xib)
253
254                gamma_m = max(gamma_m_next, self._tiny)
255                err_f = new_err_f
256
257            e_post = complex(d_in[k])
258            gamma_ladder = 1.0
259
260            for m in range(self.n_sections + 1):
261                denom_gl = max(gamma_ladder, self._tiny)
262
263                self.delta_v[m] = (
264                    self.lam * self.delta_v[m]
265                    + (curr_err_b[m] * np.conj(e_post)) / denom_gl
266                )
267
268                self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny)
269
270                e_post = e_post - np.conj(self.v[m]) * curr_err_b[m]
271
272                denom_xib_m = self.xi_b[m] + self._tiny
273                energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m])))
274                gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m)
275                gamma_ladder = max(gamma_ladder_next, self._tiny)
276
277            outputs[k] = d_in[k] - e_post
278            errors[k] = e_post
279
280            self.error_b_prev = curr_err_b.copy()
281
282            self.w = self.v.copy()
283            self._record_history()
284
285        runtime_s = float(perf_counter() - t0)
286        if verbose:
287            print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms")
288
289        extra: Optional[Dict[str, Any]] = None
290        if return_internal_states:
291            extra = {
292                "xi_f": self.xi_f.copy(),
293                "xi_b": self.xi_b.copy(),
294                "delta": self.delta.copy(),
295                "delta_v": self.delta_v.copy(),
296            }
297
298        return self._pack_results(
299            outputs=outputs,
300            errors=errors,
301            runtime_s=runtime_s,
302            error_type="a_posteriori",
303            extra=extra,
304        )

Executes LRLS adaptation (a posteriori form) over paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,). desired_signal : array_like of complex Desired/reference sequence d[k] with shape (N,). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, returns selected final internal states in result.extra (not full trajectories).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Estimated output sequence. In this implementation: outputs[k] = d[k] - e_post[k]. - errors : ndarray of complex, shape (N,) A posteriori error produced by the ladder stage (final e_post). - coefficients : ndarray Ladder coefficient history (mirrors self.v via self.w). - error_type : str Set to "a_posteriori". - extra : dict, optional Present only if return_internal_states=True (see below).

Extra (when return_internal_states=True)

xi_f : ndarray of float, shape (M+1,) Final forward energies. xi_b : ndarray of float, shape (M+1,) Final backward energies. delta : ndarray of complex, shape (M,) Final lattice delta state. delta_v : ndarray of complex, shape (M+1,) Final ladder delta state used to compute v.

class LRLSErrorFeedback(pydaptivefiltering.AdaptiveFilter):
 28class LRLSErrorFeedback(AdaptiveFilter):
 29    """
 30    Lattice RLS with a posteriori errors and Error Feedback (LRLS-EF), complex-valued.
 31
 32    Implements the lattice/ladder RLS structure with error feedback described in
 33    Diniz (Algorithm 7.5). The method decomposes the adaptation into:
 34
 35    1) **Lattice prediction stage**:
 36       Updates forward/backward a posteriori prediction errors and associated
 37       reflection-like variables via exponentially weighted energies.
 38
 39    2) **Ladder (joint-process) stage**:
 40       Estimates the ladder coefficients that map the lattice backward-error
 41       vector into the desired response.
 42
 43    In this implementation, the ladder coefficient vector is stored in ``self.v``
 44    (length ``M+1``). For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`,
 45    ``self.w`` mirrors ``self.v`` at each iteration and the coefficient history
 46    recorded by the base class corresponds to the ladder coefficients.
 47
 48    Parameters
 49    ----------
 50    filter_order : int
 51        Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients.
 52    lambda_factor : float, optional
 53        Forgetting factor ``lambda`` used in the exponentially weighted recursions.
 54        Default is 0.99.
 55    epsilon : float, optional
 56        Positive initialization/regularization constant for forward and backward
 57        energies. Default is 0.1.
 58    w_init : array_like of complex, optional
 59        Optional initial ladder coefficients of length ``M+1``. If None, initializes
 60        with zeros.
 61    safe_eps : float, optional
 62        Small positive floor used to avoid division by (near) zero and to keep the
 63        internal likelihood variables bounded. Default is 1e-12.
 64
 65    Notes
 66    -----
 67    Signals and dimensions
 68    ~~~~~~~~~~~~~~~~~~~~~~
 69    This class operates on complex-valued sequences. For lattice order ``M``:
 70
 71    - ``delta`` and ``delta_v`` have shape ``(M+1,)``
 72    - ``xi_f`` and ``xi_b`` have shape ``(M+2,)`` (energies per section plus guard)
 73    - ``error_b_prev`` has shape ``(M+2,)`` and stores the previous backward-error
 74      vector used for the error-feedback recursion.
 75    - At each time k, the ladder regressor is the backward-error vector
 76      ``curr_b[:M+1]``.
 77
 78    Output computation
 79    ~~~~~~~~~~~~~~~~~~
 80    The estimated output is formed as a ladder combination:
 81
 82    .. math::
 83        y(k) = \\mathbf{v}^H(k)\\, \\mathbf{b}(k),
 84
 85    where :math:`\\mathbf{b}(k)` corresponds to ``curr_b[:M+1]`` and
 86    :math:`\\mathbf{v}(k)` is the ladder coefficient vector ``self.v``.
 87    The reported error is the output error :math:`e(k)=d(k)-y(k)`.
 88
 89    References
 90    ----------
 91    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 92       Implementation*, Algorithm 7.5.
 93    """
 94
 95    supports_complex: bool = True
 96
 97    lam: float
 98    epsilon: float
 99    n_sections: int
100    safe_eps: float
101
102    delta: np.ndarray
103    xi_f: np.ndarray
104    xi_b: np.ndarray
105    error_b_prev: np.ndarray
106
107    v: np.ndarray
108    delta_v: np.ndarray
109
110    def __init__(
111        self,
112        filter_order: int,
113        lambda_factor: float = 0.99,
114        epsilon: float = 0.1,
115        w_init: Optional[Union[np.ndarray, list]] = None,
116        safe_eps: float = 1e-12,
117    ) -> None:
118        """
119        Parameters
120        ----------
121        filter_order:
122            Lattice order M (number of sections). Ladder has M+1 coefficients.
123        lambda_factor:
124            Forgetting factor λ.
125        epsilon:
126            Regularization/initialization constant for energies.
127        w_init:
128            Optional initial ladder coefficients (length M+1). If None, zeros.
129        safe_eps:
130            Small positive floor used to avoid division by (near) zero.
131        """
132        super().__init__(filter_order=filter_order, w_init=w_init)
133
134        self.lam = float(lambda_factor)
135        self.epsilon = float(epsilon)
136        self.n_sections = int(filter_order)
137        self.safe_eps = float(safe_eps)
138
139        self.delta = np.zeros(self.n_sections + 1, dtype=complex)
140
141        self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon
142        self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon
143
144        self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex)
145
146        if w_init is not None:
147            v0 = np.asarray(w_init, dtype=complex).ravel()
148            if v0.size != self.n_sections + 1:
149                raise ValueError(
150                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
151                )
152            self.v = v0
153        else:
154            self.v = np.zeros(self.n_sections + 1, dtype=complex)
155
156        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
157
158        self.w = self.v.copy()
159        self.w_history = []
160        self._record_history()
161
162    @validate_input
163    def optimize(
164        self,
165        input_signal: np.ndarray,
166        desired_signal: np.ndarray,
167        verbose: bool = False,
168        return_internal_states: bool = False,
169    ) -> OptimizationResult:
170        """
171        Executes LRLS-EF adaptation for paired sequences ``x[k]`` and ``d[k]``.
172
173        Parameters
174        ----------
175        input_signal : array_like of complex
176            Input sequence ``x[k]`` with shape ``(N,)``.
177        desired_signal : array_like of complex
178            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
179        verbose : bool, optional
180            If True, prints the total runtime after completion.
181        return_internal_states : bool, optional
182            If True, returns selected *final* internal states in ``result.extra``
183            (not full trajectories).
184
185        Returns
186        -------
187        OptimizationResult
188            Result object with fields:
189            - outputs : ndarray of complex, shape ``(N,)``
190                Estimated output sequence ``y[k]``.
191            - errors : ndarray of complex, shape ``(N,)``
192                Output error sequence ``e[k] = d[k] - y[k]``.
193            - coefficients : ndarray
194                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
195            - error_type : str
196                Set to ``"output_error"``.
197            - extra : dict, optional
198                Present only if ``return_internal_states=True`` (see below).
199
200        Extra (when return_internal_states=True)
201        --------------------------------------
202        xi_f : ndarray of float, shape ``(M+2,)``
203            Final forward prediction-error energies.
204        xi_b : ndarray of float, shape ``(M+2,)``
205            Final backward prediction-error energies.
206        delta : ndarray of complex, shape ``(M+1,)``
207            Final lattice delta (reflection-like) state.
208        delta_v : ndarray of complex, shape ``(M+1,)``
209            Final ladder delta state used to compute ``v``.
210        """
211        tic: float = time()
212
213        x_in = np.asarray(input_signal, dtype=complex).ravel()
214        d_in = np.asarray(desired_signal, dtype=complex).ravel()
215
216        n_samples = int(d_in.size)
217        outputs = np.zeros(n_samples, dtype=complex)
218        errors = np.zeros(n_samples, dtype=complex)
219
220        eps = self.safe_eps
221
222        for k in range(n_samples):
223            err_f = complex(x_in[k])
224
225            curr_b = np.zeros(self.n_sections + 2, dtype=complex)
226            curr_b[0] = x_in[k]
227
228            energy_x = float(np.real(x_in[k] * np.conj(x_in[k])))
229            self.xi_f[0] = self.lam * self.xi_f[0] + energy_x
230            self.xi_b[0] = self.xi_f[0]
231
232            g = 1.0
233
234            for m in range(self.n_sections + 1):
235                denom_g = max(g, eps)
236
237                self.delta[m] = (
238                    self.lam * self.delta[m]
239                    + (self.error_b_prev[m] * np.conj(err_f)) / denom_g
240                )
241
242                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps)
243                kappa_b = self.delta[m] / (self.xi_f[m] + eps)
244
245                new_err_f = err_f - kappa_f * self.error_b_prev[m]
246                curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f
247
248                self.xi_f[m + 1] = (
249                    self.lam * self.xi_f[m + 1]
250                    + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g
251                )
252                self.xi_b[m + 1] = (
253                    self.lam * self.xi_b[m + 1]
254                    + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g
255                )
256
257                energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m])))
258                g = g - (energy_b_curr / (self.xi_b[m] + eps))
259                g = max(g, eps)
260
261                err_f = new_err_f
262
263            y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1]))
264            outputs[k] = y_k
265            e_k = complex(d_in[k] - y_k)
266            errors[k] = e_k
267
268            g_ladder = 1.0
269            for m in range(self.n_sections + 1):
270                denom_gl = max(g_ladder, eps)
271
272                self.delta_v[m] = (
273                    self.lam * self.delta_v[m]
274                    + (curr_b[m] * np.conj(d_in[k])) / denom_gl
275                )
276
277                self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps)
278
279                energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m])))
280                g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps))
281                g_ladder = max(g_ladder, eps)
282
283            self.error_b_prev = curr_b
284
285            self.w = self.v.copy()
286            self._record_history()
287
288        runtime_s = float(time() - tic)
289        if verbose:
290            print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms")
291
292        extra: Optional[Dict[str, Any]] = None
293        if return_internal_states:
294            extra = {
295                "xi_f": self.xi_f.copy(),
296                "xi_b": self.xi_b.copy(),
297                "delta": self.delta.copy(),
298                "delta_v": self.delta_v.copy(),
299            }
300
301        return self._pack_results(
302            outputs=outputs,
303            errors=errors,
304            runtime_s=runtime_s,
305            error_type="output_error",
306            extra=extra,
307        )

Lattice RLS with a posteriori errors and Error Feedback (LRLS-EF), complex-valued.

Implements the lattice/ladder RLS structure with error feedback described in Diniz (Algorithm 7.5). The method decomposes the adaptation into:

1) Lattice prediction stage: Updates forward/backward a posteriori prediction errors and associated reflection-like variables via exponentially weighted energies.

2) Ladder (joint-process) stage: Estimates the ladder coefficients that map the lattice backward-error vector into the desired response.

In this implementation, the ladder coefficient vector is stored in self.v (length M+1). For compatibility with ~pydaptivefiltering.base.AdaptiveFilter, self.w mirrors self.v at each iteration and the coefficient history recorded by the base class corresponds to the ladder coefficients.

Parameters

filter_order : int Lattice order M (number of sections). The ladder has M+1 coefficients. lambda_factor : float, optional Forgetting factor lambda used in the exponentially weighted recursions. Default is 0.99. epsilon : float, optional Positive initialization/regularization constant for forward and backward energies. Default is 0.1. w_init : array_like of complex, optional Optional initial ladder coefficients of length M+1. If None, initializes with zeros. safe_eps : float, optional Small positive floor used to avoid division by (near) zero and to keep the internal likelihood variables bounded. Default is 1e-12.

Notes

Signals and dimensions ~~~~~~ This class operates on complex-valued sequences. For lattice order M:

  • delta and delta_v have shape (M+1,)
  • xi_f and xi_b have shape (M+2,) (energies per section plus guard)
  • error_b_prev has shape (M+2,) and stores the previous backward-error vector used for the error-feedback recursion.
  • At each time k, the ladder regressor is the backward-error vector curr_b[:M+1].

Output computation ~~~~~~ The estimated output is formed as a ladder combination:

$$y(k) = \mathbf{v}^H(k)\, \mathbf{b}(k),$$

where \( \mathbf{b}(k) \) corresponds to curr_b[:M+1] and \( \mathbf{v}(k) \) is the ladder coefficient vector self.v. The reported error is the output error \( e(k)=d(k)-y(k) \).

References


LRLSErrorFeedback( filter_order: int, lambda_factor: float = 0.99, epsilon: float = 0.1, w_init: Union[numpy.ndarray, list, NoneType] = None, safe_eps: float = 1e-12)
110    def __init__(
111        self,
112        filter_order: int,
113        lambda_factor: float = 0.99,
114        epsilon: float = 0.1,
115        w_init: Optional[Union[np.ndarray, list]] = None,
116        safe_eps: float = 1e-12,
117    ) -> None:
118        """
119        Parameters
120        ----------
121        filter_order:
122            Lattice order M (number of sections). Ladder has M+1 coefficients.
123        lambda_factor:
124            Forgetting factor λ.
125        epsilon:
126            Regularization/initialization constant for energies.
127        w_init:
128            Optional initial ladder coefficients (length M+1). If None, zeros.
129        safe_eps:
130            Small positive floor used to avoid division by (near) zero.
131        """
132        super().__init__(filter_order=filter_order, w_init=w_init)
133
134        self.lam = float(lambda_factor)
135        self.epsilon = float(epsilon)
136        self.n_sections = int(filter_order)
137        self.safe_eps = float(safe_eps)
138
139        self.delta = np.zeros(self.n_sections + 1, dtype=complex)
140
141        self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon
142        self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon
143
144        self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex)
145
146        if w_init is not None:
147            v0 = np.asarray(w_init, dtype=complex).ravel()
148            if v0.size != self.n_sections + 1:
149                raise ValueError(
150                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
151                )
152            self.v = v0
153        else:
154            self.v = np.zeros(self.n_sections + 1, dtype=complex)
155
156        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
157
158        self.w = self.v.copy()
159        self.w_history = []
160        self._record_history()

Parameters

filter_order: Lattice order M (number of sections). Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization/initialization constant for energies. w_init: Optional initial ladder coefficients (length M+1). If None, zeros. safe_eps: Small positive floor used to avoid division by (near) zero.

supports_complex: bool = True
lam: float
epsilon: float
n_sections: int
safe_eps: float
delta: numpy.ndarray
xi_f: numpy.ndarray
xi_b: numpy.ndarray
error_b_prev: numpy.ndarray
v: numpy.ndarray
delta_v: numpy.ndarray
w
w_history
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
162    @validate_input
163    def optimize(
164        self,
165        input_signal: np.ndarray,
166        desired_signal: np.ndarray,
167        verbose: bool = False,
168        return_internal_states: bool = False,
169    ) -> OptimizationResult:
170        """
171        Executes LRLS-EF adaptation for paired sequences ``x[k]`` and ``d[k]``.
172
173        Parameters
174        ----------
175        input_signal : array_like of complex
176            Input sequence ``x[k]`` with shape ``(N,)``.
177        desired_signal : array_like of complex
178            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
179        verbose : bool, optional
180            If True, prints the total runtime after completion.
181        return_internal_states : bool, optional
182            If True, returns selected *final* internal states in ``result.extra``
183            (not full trajectories).
184
185        Returns
186        -------
187        OptimizationResult
188            Result object with fields:
189            - outputs : ndarray of complex, shape ``(N,)``
190                Estimated output sequence ``y[k]``.
191            - errors : ndarray of complex, shape ``(N,)``
192                Output error sequence ``e[k] = d[k] - y[k]``.
193            - coefficients : ndarray
194                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
195            - error_type : str
196                Set to ``"output_error"``.
197            - extra : dict, optional
198                Present only if ``return_internal_states=True`` (see below).
199
200        Extra (when return_internal_states=True)
201        --------------------------------------
202        xi_f : ndarray of float, shape ``(M+2,)``
203            Final forward prediction-error energies.
204        xi_b : ndarray of float, shape ``(M+2,)``
205            Final backward prediction-error energies.
206        delta : ndarray of complex, shape ``(M+1,)``
207            Final lattice delta (reflection-like) state.
208        delta_v : ndarray of complex, shape ``(M+1,)``
209            Final ladder delta state used to compute ``v``.
210        """
211        tic: float = time()
212
213        x_in = np.asarray(input_signal, dtype=complex).ravel()
214        d_in = np.asarray(desired_signal, dtype=complex).ravel()
215
216        n_samples = int(d_in.size)
217        outputs = np.zeros(n_samples, dtype=complex)
218        errors = np.zeros(n_samples, dtype=complex)
219
220        eps = self.safe_eps
221
222        for k in range(n_samples):
223            err_f = complex(x_in[k])
224
225            curr_b = np.zeros(self.n_sections + 2, dtype=complex)
226            curr_b[0] = x_in[k]
227
228            energy_x = float(np.real(x_in[k] * np.conj(x_in[k])))
229            self.xi_f[0] = self.lam * self.xi_f[0] + energy_x
230            self.xi_b[0] = self.xi_f[0]
231
232            g = 1.0
233
234            for m in range(self.n_sections + 1):
235                denom_g = max(g, eps)
236
237                self.delta[m] = (
238                    self.lam * self.delta[m]
239                    + (self.error_b_prev[m] * np.conj(err_f)) / denom_g
240                )
241
242                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps)
243                kappa_b = self.delta[m] / (self.xi_f[m] + eps)
244
245                new_err_f = err_f - kappa_f * self.error_b_prev[m]
246                curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f
247
248                self.xi_f[m + 1] = (
249                    self.lam * self.xi_f[m + 1]
250                    + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g
251                )
252                self.xi_b[m + 1] = (
253                    self.lam * self.xi_b[m + 1]
254                    + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g
255                )
256
257                energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m])))
258                g = g - (energy_b_curr / (self.xi_b[m] + eps))
259                g = max(g, eps)
260
261                err_f = new_err_f
262
263            y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1]))
264            outputs[k] = y_k
265            e_k = complex(d_in[k] - y_k)
266            errors[k] = e_k
267
268            g_ladder = 1.0
269            for m in range(self.n_sections + 1):
270                denom_gl = max(g_ladder, eps)
271
272                self.delta_v[m] = (
273                    self.lam * self.delta_v[m]
274                    + (curr_b[m] * np.conj(d_in[k])) / denom_gl
275                )
276
277                self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps)
278
279                energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m])))
280                g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps))
281                g_ladder = max(g_ladder, eps)
282
283            self.error_b_prev = curr_b
284
285            self.w = self.v.copy()
286            self._record_history()
287
288        runtime_s = float(time() - tic)
289        if verbose:
290            print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms")
291
292        extra: Optional[Dict[str, Any]] = None
293        if return_internal_states:
294            extra = {
295                "xi_f": self.xi_f.copy(),
296                "xi_b": self.xi_b.copy(),
297                "delta": self.delta.copy(),
298                "delta_v": self.delta_v.copy(),
299            }
300
301        return self._pack_results(
302            outputs=outputs,
303            errors=errors,
304            runtime_s=runtime_s,
305            error_type="output_error",
306            extra=extra,
307        )

Executes LRLS-EF adaptation for paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,). desired_signal : array_like of complex Desired/reference sequence d[k] with shape (N,). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, returns selected final internal states in result.extra (not full trajectories).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Estimated output sequence y[k]. - errors : ndarray of complex, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray Ladder coefficient history (mirrors self.v via self.w). - error_type : str Set to "output_error". - extra : dict, optional Present only if return_internal_states=True (see below).

Extra (when return_internal_states=True)

xi_f : ndarray of float, shape (M+2,) Final forward prediction-error energies. xi_b : ndarray of float, shape (M+2,) Final backward prediction-error energies. delta : ndarray of complex, shape (M+1,) Final lattice delta (reflection-like) state. delta_v : ndarray of complex, shape (M+1,) Final ladder delta state used to compute v.

class LRLSPriori(pydaptivefiltering.AdaptiveFilter):
 25class LRLSPriori(AdaptiveFilter):
 26    """
 27    Lattice RLS using a priori errors (LRLS, a priori form), complex-valued.
 28
 29    Implements Diniz (Algorithm 7.4) in a lattice/ladder structure:
 30
 31    1) **Lattice prediction stage** (order ``M``):
 32       Produces forward a priori errors and a vector of backward errors, updating
 33       reflection-like state variables and exponentially weighted energies.
 34
 35    2) **Ladder (joint-process) stage** (length ``M+1``):
 36       Updates the ladder coefficients ``v`` using the a priori backward-error
 37       vector and produces an **a priori** error associated with the desired signal.
 38
 39    Library conventions
 40    -------------------
 41    - Complex-valued implementation (``supports_complex=True``).
 42    - Ladder coefficients are stored in ``self.v`` with length ``M+1``.
 43    - For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`,
 44      ``self.w`` mirrors ``self.v`` at each iteration and the base-class history
 45      corresponds to the ladder coefficient trajectory.
 46
 47    Parameters
 48    ----------
 49    filter_order : int
 50        Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients.
 51    lambda_factor : float, optional
 52        Forgetting factor ``lambda`` used in the exponentially weighted recursions.
 53        Default is 0.99.
 54    epsilon : float, optional
 55        Initialization/regularization constant for the energy variables
 56        (forward/backward). Default is 0.1.
 57    w_init : array_like of complex, optional
 58        Optional initial ladder coefficients of length ``M+1``. If None, initializes
 59        with zeros.
 60    denom_floor : float, optional
 61        Small positive floor used to avoid division by (near) zero in normalization
 62        terms (``gamma`` variables and energy denominators). Default is 1e-12.
 63
 64    Notes
 65    -----
 66    Signals and dimensions
 67    ~~~~~~~~~~~~~~~~~~~~~~
 68    For lattice order ``M``:
 69
 70    - ``delta`` has shape ``(M,)`` (lattice delta state)
 71    - ``xi_f`` and ``xi_b`` have shape ``(M+1,)`` (forward/backward energies)
 72    - ``error_b_prev`` and per-sample ``alpha_b`` have shape ``(M+1,)``
 73      (backward-error vectors)
 74    - ``v`` and ``delta_v`` have shape ``(M+1,)`` (ladder coefficients and state)
 75
 76    A priori error (as returned)
 77    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 78    The ladder stage starts with ``alpha_e = d[k]`` and removes components
 79    correlated with the backward-error vector:
 80
 81    .. math::
 82        \\alpha_e \\leftarrow \\alpha_e - v_m^*(k)\\, b_m(k),
 83
 84    where :math:`b_m(k)` are the backward errors (``alpha_b[m]``). The final
 85    value is then scaled by the final lattice normalization factor ``gamma``:
 86
 87    .. math::
 88        e_{pri}(k) = \\gamma(k)\\, \\alpha_e(k).
 89
 90    This scaled error is returned in ``errors[k]``, and the output estimate is
 91    returned as ``outputs[k] = d[k] - e_pri[k]``.
 92
 93    References
 94    ----------
 95    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 96       Implementation*, Algorithm 7.4.
 97    """
 98
 99    supports_complex: bool = True
100
101    def __init__(
102        self,
103        filter_order: int,
104        lambda_factor: float = 0.99,
105        epsilon: float = 0.1,
106        w_init: Optional[Union[np.ndarray, list]] = None,
107        denom_floor: float = 1e-12,
108    ) -> None:
109        """
110        Parameters
111        ----------
112        filter_order:
113            Number of lattice sections M. Ladder has M+1 coefficients.
114        lambda_factor:
115            Forgetting factor λ.
116        epsilon:
117            Energy initialization / regularization.
118        w_init:
119            Optional initial ladder coefficient vector (length M+1). If None, zeros.
120        denom_floor:
121            Floor used to avoid division by (near) zero in normalization terms.
122        """
123        super().__init__(filter_order=filter_order, w_init=w_init)
124
125        self.lam = float(lambda_factor)
126        self.epsilon = float(epsilon)
127        self.n_sections = int(filter_order)
128        self._tiny = float(denom_floor)
129
130        self.delta = np.zeros(self.n_sections, dtype=complex)
131        self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
132        self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
133        self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
134
135        if w_init is not None:
136            v0 = np.asarray(w_init, dtype=complex).reshape(-1)
137            if v0.size != self.n_sections + 1:
138                raise ValueError(
139                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
140                )
141            self.v = v0
142        else:
143            self.v = np.zeros(self.n_sections + 1, dtype=complex)
144
145        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
146
147        # Mirror to base API
148        self.w = self.v.copy()
149        self.w_history = []
150        self._record_history()
151
152    @validate_input
153    def optimize(
154        self,
155        input_signal: np.ndarray,
156        desired_signal: np.ndarray,
157        verbose: bool = False,
158        return_internal_states: bool = False,
159    ) -> OptimizationResult:
160        """
161        Executes LRLS adaptation (a priori form) over paired sequences ``x[k]`` and ``d[k]``.
162
163        Parameters
164        ----------
165        input_signal : array_like of complex
166            Input sequence ``x[k]`` with shape ``(N,)``.
167        desired_signal : array_like of complex
168            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
169        verbose : bool, optional
170            If True, prints the total runtime after completion.
171        return_internal_states : bool, optional
172            If True, returns selected *final* internal states in ``result.extra``
173            (not full trajectories).
174
175        Returns
176        -------
177        OptimizationResult
178            Result object with fields:
179            - outputs : ndarray of complex, shape ``(N,)``
180                Estimated output sequence. In this implementation:
181                ``outputs[k] = d[k] - e_pri[k]``.
182            - errors : ndarray of complex, shape ``(N,)``
183                A priori ladder error scaled by the final lattice normalization
184                factor: ``e_pri[k] = gamma[k] * alpha_e[k]``.
185            - coefficients : ndarray
186                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
187            - error_type : str
188                Set to ``"a_priori"``.
189            - extra : dict, optional
190                Present only if ``return_internal_states=True`` (see below).
191
192        Extra (when return_internal_states=True)
193        --------------------------------------
194        xi_f : ndarray of float, shape ``(M+1,)``
195            Final forward energies.
196        xi_b : ndarray of float, shape ``(M+1,)``
197            Final backward energies.
198        delta : ndarray of complex, shape ``(M,)``
199            Final lattice delta state.
200        delta_v : ndarray of complex, shape ``(M+1,)``
201            Final ladder delta state used to compute ``v``.
202        """
203        t0 = perf_counter()
204
205        # validate_input already normalizes to 1D and matches lengths.
206        # Force complex to respect supports_complex=True (even if x/d are real).
207        x_in = np.asarray(input_signal, dtype=complex).ravel()
208        d_in = np.asarray(desired_signal, dtype=complex).ravel()
209
210        n_samples = int(d_in.size)
211        outputs = np.zeros(n_samples, dtype=complex)
212        errors = np.zeros(n_samples, dtype=complex)
213
214        for k in range(n_samples):
215            alpha_f = complex(x_in[k])
216
217            alpha_b = np.zeros(self.n_sections + 1, dtype=complex)
218            alpha_b[0] = x_in[k]
219
220            gamma = 1.0
221            gamma_orders = np.ones(self.n_sections + 1, dtype=float)
222
223            # -------------------------
224            # Lattice stage (a priori)
225            # -------------------------
226            for m in range(self.n_sections):
227                gamma_orders[m] = gamma
228                denom_g = max(gamma, self._tiny)
229
230                self.delta[m] = (
231                    self.lam * self.delta[m]
232                    + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g
233                )
234
235                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny)
236                kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny)
237
238                alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m]
239                alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f
240
241                # Energy updates (kept as in your code, with safe denominators)
242                self.xi_f[m] = (
243                    self.lam * self.xi_f[m]
244                    + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g
245                )
246                self.xi_b[m] = (
247                    self.lam * self.xi_b[m]
248                    + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g
249                )
250
251                denom_xib = self.xi_b[m] + self._tiny
252                gamma_next = gamma - (
253                    float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib
254                )
255                gamma = max(gamma_next, self._tiny)
256                alpha_f = alpha_f_next
257
258            gamma_orders[self.n_sections] = gamma
259            self.xi_f[self.n_sections] = (
260                self.lam * self.xi_f[self.n_sections]
261                + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny)
262            )
263            self.xi_b[self.n_sections] = (
264                self.lam * self.xi_b[self.n_sections]
265                + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections])))
266                / max(gamma, self._tiny)
267            )
268
269            # -------------------------
270            # Ladder stage (a priori)
271            # -------------------------
272            alpha_e = complex(d_in[k])
273
274            for m in range(self.n_sections + 1):
275                denom_go = max(gamma_orders[m], self._tiny)
276
277                self.delta_v[m] = (
278                    self.lam * self.delta_v[m]
279                    + (alpha_b[m] * np.conj(alpha_e)) / denom_go
280                )
281
282                self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny)
283                alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m]
284
285            e_k = alpha_e * gamma
286            errors[k] = e_k
287            outputs[k] = d_in[k] - e_k
288
289            self.error_b_prev = alpha_b.copy()
290
291            # Mirror ladder coeffs into base API + record history
292            self.w = self.v.copy()
293            self._record_history()
294
295        runtime_s = float(perf_counter() - t0)
296        if verbose:
297            print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms")
298
299        extra: Optional[Dict[str, Any]] = None
300        if return_internal_states:
301            extra = {
302                "xi_f": self.xi_f.copy(),
303                "xi_b": self.xi_b.copy(),
304                "delta": self.delta.copy(),
305                "delta_v": self.delta_v.copy(),
306            }
307
308        return self._pack_results(
309            outputs=outputs,
310            errors=errors,
311            runtime_s=runtime_s,
312            error_type="a_priori",
313            extra=extra,
314        )

Lattice RLS using a priori errors (LRLS, a priori form), complex-valued.

Implements Diniz (Algorithm 7.4) in a lattice/ladder structure:

1) Lattice prediction stage (order M): Produces forward a priori errors and a vector of backward errors, updating reflection-like state variables and exponentially weighted energies.

2) Ladder (joint-process) stage (length M+1): Updates the ladder coefficients v using the a priori backward-error vector and produces an a priori error associated with the desired signal.

Library conventions

  • Complex-valued implementation (supports_complex=True).
  • Ladder coefficients are stored in self.v with length M+1.
  • For compatibility with ~pydaptivefiltering.base.AdaptiveFilter, self.w mirrors self.v at each iteration and the base-class history corresponds to the ladder coefficient trajectory.

Parameters

filter_order : int Lattice order M (number of sections). The ladder has M+1 coefficients. lambda_factor : float, optional Forgetting factor lambda used in the exponentially weighted recursions. Default is 0.99. epsilon : float, optional Initialization/regularization constant for the energy variables (forward/backward). Default is 0.1. w_init : array_like of complex, optional Optional initial ladder coefficients of length M+1. If None, initializes with zeros. denom_floor : float, optional Small positive floor used to avoid division by (near) zero in normalization terms (gamma variables and energy denominators). Default is 1e-12.

Notes

Signals and dimensions ~~~~~~ For lattice order M:

  • delta has shape (M,) (lattice delta state)
  • xi_f and xi_b have shape (M+1,) (forward/backward energies)
  • error_b_prev and per-sample alpha_b have shape (M+1,) (backward-error vectors)
  • v and delta_v have shape (M+1,) (ladder coefficients and state)

A priori error (as returned) ~~~~~~~~ The ladder stage starts with alpha_e = d[k] and removes components correlated with the backward-error vector:

$$\alpha_e \leftarrow \alpha_e - v_m^*(k)\, b_m(k),$$

where \( b_m(k) \) are the backward errors (alpha_b[m]). The final value is then scaled by the final lattice normalization factor gamma:

$$e_{pri}(k) = \gamma(k)\, \alpha_e(k).$$

This scaled error is returned in errors[k], and the output estimate is returned as outputs[k] = d[k] - e_pri[k].

References


LRLSPriori( filter_order: int, lambda_factor: float = 0.99, epsilon: float = 0.1, w_init: Union[numpy.ndarray, list, NoneType] = None, denom_floor: float = 1e-12)
101    def __init__(
102        self,
103        filter_order: int,
104        lambda_factor: float = 0.99,
105        epsilon: float = 0.1,
106        w_init: Optional[Union[np.ndarray, list]] = None,
107        denom_floor: float = 1e-12,
108    ) -> None:
109        """
110        Parameters
111        ----------
112        filter_order:
113            Number of lattice sections M. Ladder has M+1 coefficients.
114        lambda_factor:
115            Forgetting factor λ.
116        epsilon:
117            Energy initialization / regularization.
118        w_init:
119            Optional initial ladder coefficient vector (length M+1). If None, zeros.
120        denom_floor:
121            Floor used to avoid division by (near) zero in normalization terms.
122        """
123        super().__init__(filter_order=filter_order, w_init=w_init)
124
125        self.lam = float(lambda_factor)
126        self.epsilon = float(epsilon)
127        self.n_sections = int(filter_order)
128        self._tiny = float(denom_floor)
129
130        self.delta = np.zeros(self.n_sections, dtype=complex)
131        self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
132        self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon
133        self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
134
135        if w_init is not None:
136            v0 = np.asarray(w_init, dtype=complex).reshape(-1)
137            if v0.size != self.n_sections + 1:
138                raise ValueError(
139                    f"w_init must have length {self.n_sections + 1}, got {v0.size}"
140                )
141            self.v = v0
142        else:
143            self.v = np.zeros(self.n_sections + 1, dtype=complex)
144
145        self.delta_v = np.zeros(self.n_sections + 1, dtype=complex)
146
147        # Mirror to base API
148        self.w = self.v.copy()
149        self.w_history = []
150        self._record_history()

Parameters

filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms.

supports_complex: bool = True
lam
epsilon
n_sections
delta
xi_f
xi_b
error_b_prev
delta_v
w
w_history
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
152    @validate_input
153    def optimize(
154        self,
155        input_signal: np.ndarray,
156        desired_signal: np.ndarray,
157        verbose: bool = False,
158        return_internal_states: bool = False,
159    ) -> OptimizationResult:
160        """
161        Executes LRLS adaptation (a priori form) over paired sequences ``x[k]`` and ``d[k]``.
162
163        Parameters
164        ----------
165        input_signal : array_like of complex
166            Input sequence ``x[k]`` with shape ``(N,)``.
167        desired_signal : array_like of complex
168            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
169        verbose : bool, optional
170            If True, prints the total runtime after completion.
171        return_internal_states : bool, optional
172            If True, returns selected *final* internal states in ``result.extra``
173            (not full trajectories).
174
175        Returns
176        -------
177        OptimizationResult
178            Result object with fields:
179            - outputs : ndarray of complex, shape ``(N,)``
180                Estimated output sequence. In this implementation:
181                ``outputs[k] = d[k] - e_pri[k]``.
182            - errors : ndarray of complex, shape ``(N,)``
183                A priori ladder error scaled by the final lattice normalization
184                factor: ``e_pri[k] = gamma[k] * alpha_e[k]``.
185            - coefficients : ndarray
186                Ladder coefficient history (mirrors ``self.v`` via ``self.w``).
187            - error_type : str
188                Set to ``"a_priori"``.
189            - extra : dict, optional
190                Present only if ``return_internal_states=True`` (see below).
191
192        Extra (when return_internal_states=True)
193        --------------------------------------
194        xi_f : ndarray of float, shape ``(M+1,)``
195            Final forward energies.
196        xi_b : ndarray of float, shape ``(M+1,)``
197            Final backward energies.
198        delta : ndarray of complex, shape ``(M,)``
199            Final lattice delta state.
200        delta_v : ndarray of complex, shape ``(M+1,)``
201            Final ladder delta state used to compute ``v``.
202        """
203        t0 = perf_counter()
204
205        # validate_input already normalizes to 1D and matches lengths.
206        # Force complex to respect supports_complex=True (even if x/d are real).
207        x_in = np.asarray(input_signal, dtype=complex).ravel()
208        d_in = np.asarray(desired_signal, dtype=complex).ravel()
209
210        n_samples = int(d_in.size)
211        outputs = np.zeros(n_samples, dtype=complex)
212        errors = np.zeros(n_samples, dtype=complex)
213
214        for k in range(n_samples):
215            alpha_f = complex(x_in[k])
216
217            alpha_b = np.zeros(self.n_sections + 1, dtype=complex)
218            alpha_b[0] = x_in[k]
219
220            gamma = 1.0
221            gamma_orders = np.ones(self.n_sections + 1, dtype=float)
222
223            # -------------------------
224            # Lattice stage (a priori)
225            # -------------------------
226            for m in range(self.n_sections):
227                gamma_orders[m] = gamma
228                denom_g = max(gamma, self._tiny)
229
230                self.delta[m] = (
231                    self.lam * self.delta[m]
232                    + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g
233                )
234
235                kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny)
236                kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny)
237
238                alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m]
239                alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f
240
241                # Energy updates (kept as in your code, with safe denominators)
242                self.xi_f[m] = (
243                    self.lam * self.xi_f[m]
244                    + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g
245                )
246                self.xi_b[m] = (
247                    self.lam * self.xi_b[m]
248                    + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g
249                )
250
251                denom_xib = self.xi_b[m] + self._tiny
252                gamma_next = gamma - (
253                    float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib
254                )
255                gamma = max(gamma_next, self._tiny)
256                alpha_f = alpha_f_next
257
258            gamma_orders[self.n_sections] = gamma
259            self.xi_f[self.n_sections] = (
260                self.lam * self.xi_f[self.n_sections]
261                + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny)
262            )
263            self.xi_b[self.n_sections] = (
264                self.lam * self.xi_b[self.n_sections]
265                + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections])))
266                / max(gamma, self._tiny)
267            )
268
269            # -------------------------
270            # Ladder stage (a priori)
271            # -------------------------
272            alpha_e = complex(d_in[k])
273
274            for m in range(self.n_sections + 1):
275                denom_go = max(gamma_orders[m], self._tiny)
276
277                self.delta_v[m] = (
278                    self.lam * self.delta_v[m]
279                    + (alpha_b[m] * np.conj(alpha_e)) / denom_go
280                )
281
282                self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny)
283                alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m]
284
285            e_k = alpha_e * gamma
286            errors[k] = e_k
287            outputs[k] = d_in[k] - e_k
288
289            self.error_b_prev = alpha_b.copy()
290
291            # Mirror ladder coeffs into base API + record history
292            self.w = self.v.copy()
293            self._record_history()
294
295        runtime_s = float(perf_counter() - t0)
296        if verbose:
297            print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms")
298
299        extra: Optional[Dict[str, Any]] = None
300        if return_internal_states:
301            extra = {
302                "xi_f": self.xi_f.copy(),
303                "xi_b": self.xi_b.copy(),
304                "delta": self.delta.copy(),
305                "delta_v": self.delta_v.copy(),
306            }
307
308        return self._pack_results(
309            outputs=outputs,
310            errors=errors,
311            runtime_s=runtime_s,
312            error_type="a_priori",
313            extra=extra,
314        )

Executes LRLS adaptation (a priori form) over paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,). desired_signal : array_like of complex Desired/reference sequence d[k] with shape (N,). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, returns selected final internal states in result.extra (not full trajectories).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Estimated output sequence. In this implementation: outputs[k] = d[k] - e_pri[k]. - errors : ndarray of complex, shape (N,) A priori ladder error scaled by the final lattice normalization factor: e_pri[k] = gamma[k] * alpha_e[k]. - coefficients : ndarray Ladder coefficient history (mirrors self.v via self.w). - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True (see below).

Extra (when return_internal_states=True)

xi_f : ndarray of float, shape (M+1,) Final forward energies. xi_b : ndarray of float, shape (M+1,) Final backward energies. delta : ndarray of complex, shape (M,) Final lattice delta state. delta_v : ndarray of complex, shape (M+1,) Final ladder delta state used to compute v.

class NormalizedLRLS(pydaptivefiltering.AdaptiveFilter):
 27class NormalizedLRLS(AdaptiveFilter):
 28    """
 29    Normalized Lattice RLS (NLRLS) based on a posteriori error, complex-valued.
 30
 31    Implements Diniz (Algorithm 7.6). This variant introduces *normalized*
 32    internal variables so that key quantities (normalized forward/backward errors
 33    and reflection-like coefficients) are designed to be magnitude-bounded by 1,
 34    improving numerical robustness.
 35
 36    The algorithm has two coupled stages:
 37
 38    1) **Prediction stage (lattice, order M)**:
 39       Computes normalized forward/backward a posteriori errors (``bar_f``, ``bar_b``)
 40       and updates normalized reflection-like coefficients ``rho``.
 41
 42    2) **Estimation stage (normalized ladder, length M+1)**:
 43       Updates normalized coefficients ``rho_v`` and produces a normalized
 44       a posteriori error ``bar_e``. The returned error is the *de-normalized*
 45       error ``e = bar_e * xi_half``.
 46
 47    Library conventions
 48    -------------------
 49    - Complex-valued implementation (``supports_complex=True``).
 50    - The exposed coefficient vector is ``rho_v`` (length ``M+1``).
 51      For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`:
 52        * ``self.w`` mirrors ``self.rho_v`` at each iteration.
 53        * history recorded by ``_record_history()`` corresponds to ``rho_v``.
 54
 55    Parameters
 56    ----------
 57    filter_order : int
 58        Lattice order ``M`` (number of sections). The estimation stage uses
 59        ``M+1`` coefficients.
 60    lambda_factor : float, optional
 61        Forgetting factor ``lambda`` used in the exponentially weighted updates.
 62        Default is 0.99.
 63    epsilon : float, optional
 64        Small positive constant used for regularization in normalizations,
 65        magnitude clipping, and denominator protection. Default is 1e-6.
 66    w_init : array_like of complex, optional
 67        Optional initialization for ``rho_v`` with length ``M+1``.
 68        If None, initializes with zeros.
 69    denom_floor : float, optional
 70        Extra floor for denominators and sqrt protections. Default is 1e-12.
 71
 72    Notes
 73    -----
 74    Normalized variables
 75    ~~~~~~~~~~~~~~~~~~~~
 76    The implementation uses the following normalized quantities:
 77
 78    - ``xi_half``: square-root energy tracker (scalar). It normalizes the input/output
 79      so that normalized errors stay bounded.
 80    - ``bar_f``: normalized forward error for the current section.
 81    - ``bar_b_prev`` / ``bar_b_curr``: normalized backward error vectors, shape ``(M+1,)``.
 82    - ``bar_e``: normalized a posteriori error in the estimation stage.
 83    - ``rho``: normalized reflection-like coefficients for the lattice stage, shape ``(M,)``.
 84    - ``rho_v``: normalized coefficients for the estimation stage, shape ``(M+1,)``.
 85
 86    Magnitude bounding
 87    ~~~~~~~~~~~~~~~~~~
 88    Several variables are clipped to satisfy ``|z| <= 1``. The terms
 89    ``sqrt(1 - |z|^2)`` act like cosine factors in the normalized recursions and
 90    are safeguarded with ``_safe_sqrt`` to avoid negative arguments caused by
 91    round-off.
 92
 93    Output and error returned
 94    ~~~~~~~~~~~~~~~~~~~~~~~~~
 95    The filter returns the de-normalized a posteriori error:
 96
 97    ``errors[k] = bar_e[k] * xi_half[k]``
 98
 99    and the output estimate:
100
101    ``outputs[k] = d[k] - errors[k]``.
102
103    References
104    ----------
105    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
106       Implementation*, Algorithm 7.6.
107    """
108
109    supports_complex: bool = True
110
111    def __init__(
112        self,
113        filter_order: int,
114        lambda_factor: float = 0.99,
115        epsilon: float = 1e-6,
116        w_init: Optional[Union[np.ndarray, list]] = None,
117        denom_floor: float = 1e-12,
118    ) -> None:
119        """
120        Parameters
121        ----------
122        filter_order:
123            Number of lattice sections M. The estimation stage uses M+1 coefficients.
124        lambda_factor:
125            Forgetting factor λ.
126        epsilon:
127            Regularization used in normalizations and clipping.
128        w_init:
129            Optional initialization for rho_v (length M+1). If None, zeros.
130        denom_floor:
131            Extra floor for denominators / sqrt protections.
132        """
133        super().__init__(filter_order=filter_order, w_init=w_init)
134
135        self.lam = float(lambda_factor)
136        self.epsilon = float(epsilon)
137        self.n_sections = int(filter_order)
138        self._tiny = float(denom_floor)
139
140        self.rho = np.zeros(self.n_sections, dtype=complex)
141
142        if w_init is not None:
143            rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1)
144            if rho_v0.size != self.n_sections + 1:
145                raise ValueError(
146                    f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}"
147                )
148            self.rho_v = rho_v0
149        else:
150            self.rho_v = np.zeros(self.n_sections + 1, dtype=complex)
151
152        self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
153        self.xi_half = float(np.sqrt(self.epsilon))
154
155        self.w = self.rho_v.copy()
156        self.w_history = []
157        self._record_history()
158
159    @staticmethod
160    def _safe_sqrt(value: float) -> float:
161        """
162        Computes sqrt(max(value, 0.0)) to avoid negative arguments due to rounding.
163        """
164        return float(np.sqrt(max(0.0, float(value))))
165
166    @validate_input
167    def optimize(
168        self,
169        input_signal: np.ndarray,
170        desired_signal: np.ndarray,
171        verbose: bool = False,
172        return_internal_states: bool = False,
173    ) -> OptimizationResult:
174        """
175        Run the Normalized LRLS (NLRLS) recursion over paired sequences ``x[k]`` and ``d[k]``.
176
177        Parameters
178        ----------
179        input_signal : array_like of complex
180            Input sequence ``x[k]`` with shape ``(N,)``.
181        desired_signal : array_like of complex
182            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, returns selected *final* internal states in ``result.extra``
187            (not full trajectories).
188
189        Returns
190        -------
191        OptimizationResult
192            outputs : ndarray of complex, shape ``(N,)``
193                Estimated output sequence ``y[k] = d[k] - e_post[k]``.
194            errors : ndarray of complex, shape ``(N,)``
195                De-normalized a posteriori error ``e_post[k] = bar_e[k] * xi_half[k]``.
196            coefficients : ndarray
197                History of ``rho_v`` (mirrors ``self.rho_v`` via ``self.w``).
198            error_type : str
199                Set to ``"a_posteriori"``.
200            extra : dict, optional
201                Present only if ``return_internal_states=True`` (see below).
202
203        Extra (when return_internal_states=True)
204        --------------------------------------
205        rho : ndarray of complex, shape ``(M,)``
206            Final normalized lattice reflection-like coefficients.
207        rho_v : ndarray of complex, shape ``(M+1,)``
208            Final normalized estimation-stage coefficients.
209        xi_half : float
210            Final square-root energy tracker used for normalization.
211        """
212        t0 = perf_counter()
213
214        # validate_input already normalizes to 1D and matches lengths.
215        # Force complex to respect supports_complex=True (even if x/d are real).
216        x_in = np.asarray(input_signal, dtype=complex).ravel()
217        d_in = np.asarray(desired_signal, dtype=complex).ravel()
218
219        n_samples = int(d_in.size)
220        outputs = np.zeros(n_samples, dtype=complex)
221        errors = np.zeros(n_samples, dtype=complex)
222
223        sqrt_lam = float(np.sqrt(self.lam))
224
225        for k in range(n_samples):
226            # Update xi_half (sqrt energy)
227            xi_sq = float(self.xi_half**2)
228            xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2))
229            self.xi_half = self._safe_sqrt(xi_sq)
230
231            denom_x = float(self.xi_half + self.epsilon)
232            bar_f = x_in[k] / denom_x
233
234            abs_bf = np.abs(bar_f)
235            if abs_bf > 1.0:
236                bar_f = bar_f / abs_bf
237
238            bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex)
239            bar_b_curr[0] = bar_f
240
241            # -------------------------
242            # Prediction stage
243            # -------------------------
244            for m in range(self.n_sections):
245                cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2))
246                cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2))
247
248                self.rho[m] = (
249                    (sqrt_lam * cos_f * cos_b_prev * self.rho[m])
250                    + (np.conj(bar_f) * self.bar_b_prev[m])
251                )
252
253                abs_rho = np.abs(self.rho[m])
254                if abs_rho >= 1.0:
255                    self.rho[m] = self.rho[m] / (abs_rho + self.epsilon)
256
257                cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2))
258
259                denom_f = float((cos_rho * cos_b_prev) + self.epsilon)
260                denom_b = float((cos_rho * cos_f) + self.epsilon)
261
262                f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f
263                b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b
264
265                bar_f = f_next
266                bar_b_curr[m + 1] = b_next
267
268            # -------------------------
269            # Estimation stage
270            # -------------------------
271            bar_e = d_in[k] / float(self.xi_half + self.epsilon)
272            abs_be = np.abs(bar_e)
273            if abs_be > 1.0:
274                bar_e = bar_e / abs_be
275
276            for m in range(self.n_sections + 1):
277                cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2))
278                cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2))
279
280                self.rho_v[m] = (
281                    (sqrt_lam * cos_e * cos_b * self.rho_v[m])
282                    + (np.conj(bar_e) * bar_b_curr[m])
283                )
284
285                abs_rv = np.abs(self.rho_v[m])
286                if abs_rv >= 1.0:
287                    self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon)
288
289                cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2))
290
291                denom_e = float((cos_rho_v * cos_b) + self.epsilon)
292                bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e
293
294            errors[k] = bar_e * self.xi_half
295            outputs[k] = d_in[k] - errors[k]
296
297            self.bar_b_prev = bar_b_curr.copy()
298
299            self.w = self.rho_v.copy()
300            self._record_history()
301
302        runtime_s = float(perf_counter() - t0)
303        if verbose:
304            print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms")
305
306        extra: Optional[Dict[str, Any]] = None
307        if return_internal_states:
308            extra = {
309                "rho": self.rho.copy(),
310                "rho_v": self.rho_v.copy(),
311                "xi_half": self.xi_half,
312            }
313
314        return self._pack_results(
315            outputs=outputs,
316            errors=errors,
317            runtime_s=runtime_s,
318            error_type="a_posteriori",
319            extra=extra,
320        )

Normalized Lattice RLS (NLRLS) based on a posteriori error, complex-valued.

Implements Diniz (Algorithm 7.6). This variant introduces normalized internal variables so that key quantities (normalized forward/backward errors and reflection-like coefficients) are designed to be magnitude-bounded by 1, improving numerical robustness.

The algorithm has two coupled stages:

1) Prediction stage (lattice, order M): Computes normalized forward/backward a posteriori errors (bar_f, bar_b) and updates normalized reflection-like coefficients rho.

2) Estimation stage (normalized ladder, length M+1): Updates normalized coefficients rho_v and produces a normalized a posteriori error bar_e. The returned error is the de-normalized error e = bar_e * xi_half.

Library conventions

  • Complex-valued implementation (supports_complex=True).
  • The exposed coefficient vector is rho_v (length M+1). For compatibility with ~pydaptivefiltering.base.AdaptiveFilter:
    • self.w mirrors self.rho_v at each iteration.
    • history recorded by _record_history() corresponds to rho_v.

Parameters

filter_order : int Lattice order M (number of sections). The estimation stage uses M+1 coefficients. lambda_factor : float, optional Forgetting factor lambda used in the exponentially weighted updates. Default is 0.99. epsilon : float, optional Small positive constant used for regularization in normalizations, magnitude clipping, and denominator protection. Default is 1e-6. w_init : array_like of complex, optional Optional initialization for rho_v with length M+1. If None, initializes with zeros. denom_floor : float, optional Extra floor for denominators and sqrt protections. Default is 1e-12.

Notes

Normalized variables ~~~~ The implementation uses the following normalized quantities:

  • xi_half: square-root energy tracker (scalar). It normalizes the input/output so that normalized errors stay bounded.
  • bar_f: normalized forward error for the current section.
  • bar_b_prev / bar_b_curr: normalized backward error vectors, shape (M+1,).
  • bar_e: normalized a posteriori error in the estimation stage.
  • rho: normalized reflection-like coefficients for the lattice stage, shape (M,).
  • rho_v: normalized coefficients for the estimation stage, shape (M+1,).

Magnitude bounding ~~~~~~ Several variables are clipped to satisfy |z| <= 1. The terms sqrt(1 - |z|^2) act like cosine factors in the normalized recursions and are safeguarded with _safe_sqrt to avoid negative arguments caused by round-off.

Output and error returned ~~~~~ The filter returns the de-normalized a posteriori error:

errors[k] = bar_e[k] * xi_half[k]

and the output estimate:

outputs[k] = d[k] - errors[k].

References


NormalizedLRLS( filter_order: int, lambda_factor: float = 0.99, epsilon: float = 1e-06, w_init: Union[numpy.ndarray, list, NoneType] = None, denom_floor: float = 1e-12)
111    def __init__(
112        self,
113        filter_order: int,
114        lambda_factor: float = 0.99,
115        epsilon: float = 1e-6,
116        w_init: Optional[Union[np.ndarray, list]] = None,
117        denom_floor: float = 1e-12,
118    ) -> None:
119        """
120        Parameters
121        ----------
122        filter_order:
123            Number of lattice sections M. The estimation stage uses M+1 coefficients.
124        lambda_factor:
125            Forgetting factor λ.
126        epsilon:
127            Regularization used in normalizations and clipping.
128        w_init:
129            Optional initialization for rho_v (length M+1). If None, zeros.
130        denom_floor:
131            Extra floor for denominators / sqrt protections.
132        """
133        super().__init__(filter_order=filter_order, w_init=w_init)
134
135        self.lam = float(lambda_factor)
136        self.epsilon = float(epsilon)
137        self.n_sections = int(filter_order)
138        self._tiny = float(denom_floor)
139
140        self.rho = np.zeros(self.n_sections, dtype=complex)
141
142        if w_init is not None:
143            rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1)
144            if rho_v0.size != self.n_sections + 1:
145                raise ValueError(
146                    f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}"
147                )
148            self.rho_v = rho_v0
149        else:
150            self.rho_v = np.zeros(self.n_sections + 1, dtype=complex)
151
152        self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex)
153        self.xi_half = float(np.sqrt(self.epsilon))
154
155        self.w = self.rho_v.copy()
156        self.w_history = []
157        self._record_history()

Parameters

filter_order: Number of lattice sections M. The estimation stage uses M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization used in normalizations and clipping. w_init: Optional initialization for rho_v (length M+1). If None, zeros. denom_floor: Extra floor for denominators / sqrt protections.

supports_complex: bool = True
lam
epsilon
n_sections
rho
bar_b_prev
xi_half
w
w_history
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
166    @validate_input
167    def optimize(
168        self,
169        input_signal: np.ndarray,
170        desired_signal: np.ndarray,
171        verbose: bool = False,
172        return_internal_states: bool = False,
173    ) -> OptimizationResult:
174        """
175        Run the Normalized LRLS (NLRLS) recursion over paired sequences ``x[k]`` and ``d[k]``.
176
177        Parameters
178        ----------
179        input_signal : array_like of complex
180            Input sequence ``x[k]`` with shape ``(N,)``.
181        desired_signal : array_like of complex
182            Desired/reference sequence ``d[k]`` with shape ``(N,)``.
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, returns selected *final* internal states in ``result.extra``
187            (not full trajectories).
188
189        Returns
190        -------
191        OptimizationResult
192            outputs : ndarray of complex, shape ``(N,)``
193                Estimated output sequence ``y[k] = d[k] - e_post[k]``.
194            errors : ndarray of complex, shape ``(N,)``
195                De-normalized a posteriori error ``e_post[k] = bar_e[k] * xi_half[k]``.
196            coefficients : ndarray
197                History of ``rho_v`` (mirrors ``self.rho_v`` via ``self.w``).
198            error_type : str
199                Set to ``"a_posteriori"``.
200            extra : dict, optional
201                Present only if ``return_internal_states=True`` (see below).
202
203        Extra (when return_internal_states=True)
204        --------------------------------------
205        rho : ndarray of complex, shape ``(M,)``
206            Final normalized lattice reflection-like coefficients.
207        rho_v : ndarray of complex, shape ``(M+1,)``
208            Final normalized estimation-stage coefficients.
209        xi_half : float
210            Final square-root energy tracker used for normalization.
211        """
212        t0 = perf_counter()
213
214        # validate_input already normalizes to 1D and matches lengths.
215        # Force complex to respect supports_complex=True (even if x/d are real).
216        x_in = np.asarray(input_signal, dtype=complex).ravel()
217        d_in = np.asarray(desired_signal, dtype=complex).ravel()
218
219        n_samples = int(d_in.size)
220        outputs = np.zeros(n_samples, dtype=complex)
221        errors = np.zeros(n_samples, dtype=complex)
222
223        sqrt_lam = float(np.sqrt(self.lam))
224
225        for k in range(n_samples):
226            # Update xi_half (sqrt energy)
227            xi_sq = float(self.xi_half**2)
228            xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2))
229            self.xi_half = self._safe_sqrt(xi_sq)
230
231            denom_x = float(self.xi_half + self.epsilon)
232            bar_f = x_in[k] / denom_x
233
234            abs_bf = np.abs(bar_f)
235            if abs_bf > 1.0:
236                bar_f = bar_f / abs_bf
237
238            bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex)
239            bar_b_curr[0] = bar_f
240
241            # -------------------------
242            # Prediction stage
243            # -------------------------
244            for m in range(self.n_sections):
245                cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2))
246                cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2))
247
248                self.rho[m] = (
249                    (sqrt_lam * cos_f * cos_b_prev * self.rho[m])
250                    + (np.conj(bar_f) * self.bar_b_prev[m])
251                )
252
253                abs_rho = np.abs(self.rho[m])
254                if abs_rho >= 1.0:
255                    self.rho[m] = self.rho[m] / (abs_rho + self.epsilon)
256
257                cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2))
258
259                denom_f = float((cos_rho * cos_b_prev) + self.epsilon)
260                denom_b = float((cos_rho * cos_f) + self.epsilon)
261
262                f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f
263                b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b
264
265                bar_f = f_next
266                bar_b_curr[m + 1] = b_next
267
268            # -------------------------
269            # Estimation stage
270            # -------------------------
271            bar_e = d_in[k] / float(self.xi_half + self.epsilon)
272            abs_be = np.abs(bar_e)
273            if abs_be > 1.0:
274                bar_e = bar_e / abs_be
275
276            for m in range(self.n_sections + 1):
277                cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2))
278                cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2))
279
280                self.rho_v[m] = (
281                    (sqrt_lam * cos_e * cos_b * self.rho_v[m])
282                    + (np.conj(bar_e) * bar_b_curr[m])
283                )
284
285                abs_rv = np.abs(self.rho_v[m])
286                if abs_rv >= 1.0:
287                    self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon)
288
289                cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2))
290
291                denom_e = float((cos_rho_v * cos_b) + self.epsilon)
292                bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e
293
294            errors[k] = bar_e * self.xi_half
295            outputs[k] = d_in[k] - errors[k]
296
297            self.bar_b_prev = bar_b_curr.copy()
298
299            self.w = self.rho_v.copy()
300            self._record_history()
301
302        runtime_s = float(perf_counter() - t0)
303        if verbose:
304            print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms")
305
306        extra: Optional[Dict[str, Any]] = None
307        if return_internal_states:
308            extra = {
309                "rho": self.rho.copy(),
310                "rho_v": self.rho_v.copy(),
311                "xi_half": self.xi_half,
312            }
313
314        return self._pack_results(
315            outputs=outputs,
316            errors=errors,
317            runtime_s=runtime_s,
318            error_type="a_posteriori",
319            extra=extra,
320        )

Run the Normalized LRLS (NLRLS) recursion over paired sequences x[k] and d[k].

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,). desired_signal : array_like of complex Desired/reference sequence d[k] with shape (N,). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, returns selected final internal states in result.extra (not full trajectories).

Returns

OptimizationResult outputs : ndarray of complex, shape (N,) Estimated output sequence y[k] = d[k] - e_post[k]. errors : ndarray of complex, shape (N,) De-normalized a posteriori error e_post[k] = bar_e[k] * xi_half[k]. coefficients : ndarray History of rho_v (mirrors self.rho_v via self.w). error_type : str Set to "a_posteriori". extra : dict, optional Present only if return_internal_states=True (see below).

Extra (when return_internal_states=True)

rho : ndarray of complex, shape (M,) Final normalized lattice reflection-like coefficients. rho_v : ndarray of complex, shape (M+1,) Final normalized estimation-stage coefficients. xi_half : float Final square-root energy tracker used for normalization.

class FastRLS(pydaptivefiltering.AdaptiveFilter):
 25class FastRLS(AdaptiveFilter):
 26    """
 27    Fast Transversal Recursive Least-Squares (FT-RLS) algorithm (complex-valued).
 28
 29    The Fast Transversal RLS (also called Fast RLS) is a computationally
 30    efficient alternative to standard RLS. By exploiting shift-structure in the
 31    regressor and using coupled forward/backward linear prediction recursions,
 32    it reduces the per-sample complexity from :math:`O(M^2)` (standard RLS) to
 33    approximately :math:`O(M)`.
 34
 35    This implementation follows Diniz (Alg. 8.1) and maintains internal state
 36    for forward and backward predictors, as well as the conversion (likelihood)
 37    variable :math:`\\gamma(k)` that maps a priori to a posteriori quantities.
 38
 39    Parameters
 40    ----------
 41    filter_order : int
 42        FIR filter order ``M``. The number of coefficients is ``M + 1``.
 43    forgetting_factor : float, optional
 44        Exponential forgetting factor ``lambda``. Typical values are in
 45        ``[0.95, 1.0]``; values closer to 1 give longer memory. Default is 0.99.
 46    epsilon : float, optional
 47        Positive initialization for the minimum prediction-error energies
 48        (regularization), used as :math:`\\xi_{\\min}(0)` in the recursions.
 49        Default is 0.1.
 50    w_init : array_like of complex, optional
 51        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 52        initializes with zeros.
 53    safe_eps : float, optional (keyword-only)
 54        Small constant used to guard divisions in internal recursions when
 55        denominators approach zero. Default is 1e-30.
 56
 57    Notes
 58    -----
 59    Convention
 60    ~~~~~~~~~~
 61    At time ``k``, the regressor is formed (most recent sample first) as:
 62
 63    .. math::
 64        x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T.
 65
 66    A priori vs a posteriori
 67    ~~~~~~~~~~~~~~~~~~~~~~~~
 68    The a priori output and error are:
 69
 70    .. math::
 71        y(k) = w^H(k-1) x_k, \\qquad e(k) = d(k) - y(k).
 72
 73    This implementation also computes the a posteriori error using the
 74    conversion variable :math:`\\gamma(k)` (from the FT-RLS recursions):
 75
 76    .. math::
 77        e_{\\text{post}}(k) = \\gamma(k)\\, e(k), \\qquad
 78        y_{\\text{post}}(k) = d(k) - e_{\\text{post}}(k).
 79
 80    The main-filter coefficient update uses the normalized gain-like vector
 81    produced by the transversal recursions (``phi_hat_n`` in the code):
 82
 83    .. math::
 84        w(k) = w(k-1) + \\phi(k)\\, e_{\\text{post}}^*(k),
 85
 86    where :math:`\\phi(k)` corresponds to the internal vector ``phi_hat_n``.
 87
 88    Returned internals
 89    ~~~~~~~~~~~~~~~~~~
 90    The method always returns a posteriori sequences in ``extra``:
 91    ``outputs_posteriori`` and ``errors_posteriori``. If
 92    ``return_internal_states=True``, it additionally returns tracks of
 93    ``gamma`` and the forward minimum prediction-error energy ``xi_min_f``.
 94
 95    References
 96    ----------
 97    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 98       Implementation*, 5th ed., Algorithm 8.1.
 99    """
100    supports_complex: bool = True
101    forgetting_factor: float
102    epsilon: float
103    n_coeffs: int
104
105    def __init__(
106        self,
107        filter_order: int,
108        forgetting_factor: float = 0.99,
109        epsilon: float = 0.1,
110        w_init: Optional[Union[np.ndarray, list]] = None,
111        *,
112        safe_eps: float = 1e-30,
113    ) -> None:
114        super().__init__(filter_order=filter_order, w_init=w_init)
115        self.forgetting_factor = float(forgetting_factor)
116        self.epsilon = float(epsilon)
117        self.n_coeffs = int(filter_order + 1)
118        self._safe_eps = float(safe_eps)
119
120        self.w = np.asarray(self.w, dtype=np.complex128)
121
122    @validate_input
123    def optimize(
124        self,
125        input_signal: np.ndarray,
126        desired_signal: np.ndarray,
127        verbose: bool = False,
128        return_internal_states: bool = False,
129    ) -> OptimizationResult:
130        """
131        Executes the FT-RLS adaptation loop.
132
133        Parameters
134        ----------
135        input_signal : array_like of complex
136            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
137        desired_signal : array_like of complex
138            Desired/reference sequence ``d[k]`` with shape ``(N,)`` (will be
139            flattened). Must have the same length as ``input_signal``.
140        verbose : bool, optional
141            If True, prints the total runtime after completion.
142        return_internal_states : bool, optional
143            If True, includes additional internal trajectories in
144            ``result.extra``:
145            - ``"gamma"``: ndarray of float, shape ``(N,)`` with :math:`\\gamma(k)`.
146            - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` with the forward
147              minimum prediction-error energy :math:`\\xi_{f,\\min}(k)`.
148
149        Returns
150        -------
151        OptimizationResult
152            Result object with fields:
153            - outputs : ndarray of complex, shape ``(N,)``
154                A priori output sequence ``y[k] = w^H(k-1) x_k``.
155            - errors : ndarray of complex, shape ``(N,)``
156                A priori error sequence ``e[k] = d[k] - y[k]``.
157            - coefficients : ndarray of complex
158                Coefficient history recorded by the base class.
159            - error_type : str
160                Set to ``"a_priori"``.
161            - extra : dict
162                Always includes:
163                - ``"outputs_posteriori"``: ndarray of complex, shape ``(N,)``.
164                - ``"errors_posteriori"``: ndarray of complex, shape ``(N,)``.
165                Additionally includes ``"gamma"`` and ``"xi_min_f"`` if
166                ``return_internal_states=True``.
167        """
168        tic: float = time()
169
170        x: np.ndarray = np.asarray(input_signal, dtype=np.complex128).ravel()
171        d: np.ndarray = np.asarray(desired_signal, dtype=np.complex128).ravel()
172
173        n_samples: int = int(x.size)
174        m_plus_1: int = int(self.filter_order + 1)
175
176        outputs: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
177        errors: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
178        outputs_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
179        errors_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
180
181        gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
182        xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
183
184        w_f: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
185        w_b: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
186        phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
187
188        gamma_n: float = 1.0
189        xi_min_f_prev: float = float(self.epsilon)
190        xi_min_b: float = float(self.epsilon)
191
192        x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=np.complex128)
193        x_padded[m_plus_1:] = x
194
195        lam = float(self.forgetting_factor)
196        eps = float(self._safe_eps)
197
198        for k in range(n_samples):
199            regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1]
200
201            e_f_priori: np.complex128 = regressor[0] - np.dot(w_f.conj(), regressor[1:])
202            e_f_post: np.complex128 = e_f_priori * gamma_n
203
204            xi_min_f_curr: float = float(lam * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post)))
205
206            den_phi = lam * xi_min_f_prev
207            if abs(den_phi) < eps:
208                den_phi = np.copysign(eps, den_phi if den_phi != 0 else 1.0)
209            phi_gain: np.complex128 = e_f_priori / den_phi
210
211            phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=np.complex128)
212            phi_hat_n_plus_1[1:] = phi_hat_n
213            phi_hat_n_plus_1[0] += phi_gain
214            phi_hat_n_plus_1[1:] -= phi_gain * w_f
215
216            w_f = w_f + phi_hat_n * np.conj(e_f_post)
217
218            den_g = xi_min_f_curr
219            if abs(den_g) < eps:
220                den_g = np.copysign(eps, den_g if den_g != 0 else 1.0)
221            gamma_n_plus_1: float = float((lam * xi_min_f_prev * gamma_n) / den_g)
222
223            e_b_priori: np.complex128 = lam * xi_min_b * phi_hat_n_plus_1[-1]
224
225            den_gamma = np.real((1.0 / gamma_n_plus_1) - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori)))
226            if abs(den_gamma) < eps:
227                den_gamma = np.copysign(eps, den_gamma if den_gamma != 0 else 1.0)
228            gamma_n = float(1.0 / den_gamma)
229
230            e_b_post: np.complex128 = e_b_priori * gamma_n
231            xi_min_b = float(lam * xi_min_b + np.real(e_b_post * np.conj(e_b_priori)))
232
233            phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b
234            w_b = w_b + phi_hat_n * np.conj(e_b_post)
235
236            y_k: np.complex128 = np.dot(self.w.conj(), regressor[:-1])
237            outputs[k] = y_k
238
239            e_k: np.complex128 = d[k] - y_k
240            errors[k] = e_k
241
242            errors_post[k] = e_k * gamma_n
243            outputs_post[k] = d[k] - errors_post[k]
244
245            self.w = self.w + phi_hat_n * np.conj(errors_post[k])
246            self._record_history()
247
248            if return_internal_states and gamma_track is not None and xi_f_track is not None:
249                gamma_track[k] = gamma_n
250                xi_f_track[k] = xi_min_f_curr
251
252            xi_min_f_prev = xi_min_f_curr
253
254        runtime_s: float = float(time() - tic)
255        if verbose:
256            print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms")
257
258        extra: Dict[str, Any] = {
259            "outputs_posteriori": outputs_post,
260            "errors_posteriori": errors_post,
261        }
262        if return_internal_states:
263            extra.update({"gamma": gamma_track, "xi_min_f": xi_f_track})
264
265        return self._pack_results(
266            outputs=outputs,
267            errors=errors,
268            runtime_s=runtime_s,
269            error_type="a_priori",
270            extra=extra,
271        )

Fast Transversal Recursive Least-Squares (FT-RLS) algorithm (complex-valued).

The Fast Transversal RLS (also called Fast RLS) is a computationally efficient alternative to standard RLS. By exploiting shift-structure in the regressor and using coupled forward/backward linear prediction recursions, it reduces the per-sample complexity from \( O(M^2) \) (standard RLS) to approximately \( O(M) \).

This implementation follows Diniz (Alg. 8.1) and maintains internal state for forward and backward predictors, as well as the conversion (likelihood) variable \( \gamma(k) \) that maps a priori to a posteriori quantities.

Parameters

filter_order : int FIR filter order M. The number of coefficients is M + 1. forgetting_factor : float, optional Exponential forgetting factor lambda. Typical values are in [0.95, 1.0]; values closer to 1 give longer memory. Default is 0.99. epsilon : float, optional Positive initialization for the minimum prediction-error energies (regularization), used as \( \xi_{\min}(0) \) in the recursions. Default is 0.1. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. safe_eps : float, optional (keyword-only) Small constant used to guard divisions in internal recursions when denominators approach zero. Default is 1e-30.

Notes

Convention ~~ At time k, the regressor is formed (most recent sample first) as:

$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T.$$

A priori vs a posteriori ~~~~~~~~ The a priori output and error are:

$$y(k) = w^H(k-1) x_k, \qquad e(k) = d(k) - y(k).$$

This implementation also computes the a posteriori error using the conversion variable \( \gamma(k) \) (from the FT-RLS recursions):

$$e_{\text{post}}(k) = \gamma(k)\, e(k), \qquad y_{\text{post}}(k) = d(k) - e_{\text{post}}(k).$$

The main-filter coefficient update uses the normalized gain-like vector produced by the transversal recursions (phi_hat_n in the code):

$$w(k) = w(k-1) + \phi(k)\, e_{\text{post}}^*(k),$$

where \( \phi(k) \) corresponds to the internal vector phi_hat_n.

Returned internals ~~~~~~ The method always returns a posteriori sequences in extra: outputs_posteriori and errors_posteriori. If return_internal_states=True, it additionally returns tracks of gamma and the forward minimum prediction-error energy xi_min_f.

References


FastRLS( filter_order: int, forgetting_factor: float = 0.99, epsilon: float = 0.1, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-30)
105    def __init__(
106        self,
107        filter_order: int,
108        forgetting_factor: float = 0.99,
109        epsilon: float = 0.1,
110        w_init: Optional[Union[np.ndarray, list]] = None,
111        *,
112        safe_eps: float = 1e-30,
113    ) -> None:
114        super().__init__(filter_order=filter_order, w_init=w_init)
115        self.forgetting_factor = float(forgetting_factor)
116        self.epsilon = float(epsilon)
117        self.n_coeffs = int(filter_order + 1)
118        self._safe_eps = float(safe_eps)
119
120        self.w = np.asarray(self.w, dtype=np.complex128)
supports_complex: bool = True
forgetting_factor: float
epsilon: float
n_coeffs: int
w
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
122    @validate_input
123    def optimize(
124        self,
125        input_signal: np.ndarray,
126        desired_signal: np.ndarray,
127        verbose: bool = False,
128        return_internal_states: bool = False,
129    ) -> OptimizationResult:
130        """
131        Executes the FT-RLS adaptation loop.
132
133        Parameters
134        ----------
135        input_signal : array_like of complex
136            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
137        desired_signal : array_like of complex
138            Desired/reference sequence ``d[k]`` with shape ``(N,)`` (will be
139            flattened). Must have the same length as ``input_signal``.
140        verbose : bool, optional
141            If True, prints the total runtime after completion.
142        return_internal_states : bool, optional
143            If True, includes additional internal trajectories in
144            ``result.extra``:
145            - ``"gamma"``: ndarray of float, shape ``(N,)`` with :math:`\\gamma(k)`.
146            - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` with the forward
147              minimum prediction-error energy :math:`\\xi_{f,\\min}(k)`.
148
149        Returns
150        -------
151        OptimizationResult
152            Result object with fields:
153            - outputs : ndarray of complex, shape ``(N,)``
154                A priori output sequence ``y[k] = w^H(k-1) x_k``.
155            - errors : ndarray of complex, shape ``(N,)``
156                A priori error sequence ``e[k] = d[k] - y[k]``.
157            - coefficients : ndarray of complex
158                Coefficient history recorded by the base class.
159            - error_type : str
160                Set to ``"a_priori"``.
161            - extra : dict
162                Always includes:
163                - ``"outputs_posteriori"``: ndarray of complex, shape ``(N,)``.
164                - ``"errors_posteriori"``: ndarray of complex, shape ``(N,)``.
165                Additionally includes ``"gamma"`` and ``"xi_min_f"`` if
166                ``return_internal_states=True``.
167        """
168        tic: float = time()
169
170        x: np.ndarray = np.asarray(input_signal, dtype=np.complex128).ravel()
171        d: np.ndarray = np.asarray(desired_signal, dtype=np.complex128).ravel()
172
173        n_samples: int = int(x.size)
174        m_plus_1: int = int(self.filter_order + 1)
175
176        outputs: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
177        errors: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
178        outputs_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
179        errors_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128)
180
181        gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
182        xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None
183
184        w_f: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
185        w_b: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
186        phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128)
187
188        gamma_n: float = 1.0
189        xi_min_f_prev: float = float(self.epsilon)
190        xi_min_b: float = float(self.epsilon)
191
192        x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=np.complex128)
193        x_padded[m_plus_1:] = x
194
195        lam = float(self.forgetting_factor)
196        eps = float(self._safe_eps)
197
198        for k in range(n_samples):
199            regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1]
200
201            e_f_priori: np.complex128 = regressor[0] - np.dot(w_f.conj(), regressor[1:])
202            e_f_post: np.complex128 = e_f_priori * gamma_n
203
204            xi_min_f_curr: float = float(lam * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post)))
205
206            den_phi = lam * xi_min_f_prev
207            if abs(den_phi) < eps:
208                den_phi = np.copysign(eps, den_phi if den_phi != 0 else 1.0)
209            phi_gain: np.complex128 = e_f_priori / den_phi
210
211            phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=np.complex128)
212            phi_hat_n_plus_1[1:] = phi_hat_n
213            phi_hat_n_plus_1[0] += phi_gain
214            phi_hat_n_plus_1[1:] -= phi_gain * w_f
215
216            w_f = w_f + phi_hat_n * np.conj(e_f_post)
217
218            den_g = xi_min_f_curr
219            if abs(den_g) < eps:
220                den_g = np.copysign(eps, den_g if den_g != 0 else 1.0)
221            gamma_n_plus_1: float = float((lam * xi_min_f_prev * gamma_n) / den_g)
222
223            e_b_priori: np.complex128 = lam * xi_min_b * phi_hat_n_plus_1[-1]
224
225            den_gamma = np.real((1.0 / gamma_n_plus_1) - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori)))
226            if abs(den_gamma) < eps:
227                den_gamma = np.copysign(eps, den_gamma if den_gamma != 0 else 1.0)
228            gamma_n = float(1.0 / den_gamma)
229
230            e_b_post: np.complex128 = e_b_priori * gamma_n
231            xi_min_b = float(lam * xi_min_b + np.real(e_b_post * np.conj(e_b_priori)))
232
233            phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b
234            w_b = w_b + phi_hat_n * np.conj(e_b_post)
235
236            y_k: np.complex128 = np.dot(self.w.conj(), regressor[:-1])
237            outputs[k] = y_k
238
239            e_k: np.complex128 = d[k] - y_k
240            errors[k] = e_k
241
242            errors_post[k] = e_k * gamma_n
243            outputs_post[k] = d[k] - errors_post[k]
244
245            self.w = self.w + phi_hat_n * np.conj(errors_post[k])
246            self._record_history()
247
248            if return_internal_states and gamma_track is not None and xi_f_track is not None:
249                gamma_track[k] = gamma_n
250                xi_f_track[k] = xi_min_f_curr
251
252            xi_min_f_prev = xi_min_f_curr
253
254        runtime_s: float = float(time() - tic)
255        if verbose:
256            print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms")
257
258        extra: Dict[str, Any] = {
259            "outputs_posteriori": outputs_post,
260            "errors_posteriori": errors_post,
261        }
262        if return_internal_states:
263            extra.update({"gamma": gamma_track, "xi_min_f": xi_f_track})
264
265        return self._pack_results(
266            outputs=outputs,
267            errors=errors,
268            runtime_s=runtime_s,
269            error_type="a_priori",
270            extra=extra,
271        )

Executes the FT-RLS adaptation loop.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of complex Desired/reference sequence d[k] with shape (N,) (will be flattened). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes additional internal trajectories in result.extra: - "gamma": ndarray of float, shape (N,) with \( \gamma(k) \). - "xi_min_f": ndarray of float, shape (N,) with the forward minimum prediction-error energy \( \xi_{f,\min}(k) \).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) A priori output sequence y[k] = w^H(k-1) x_k. - errors : ndarray of complex, shape (N,) A priori error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always includes: - "outputs_posteriori": ndarray of complex, shape (N,). - "errors_posteriori": ndarray of complex, shape (N,). Additionally includes "gamma" and "xi_min_f" if return_internal_states=True.

class StabFastRLS(pydaptivefiltering.AdaptiveFilter):
 26class StabFastRLS(AdaptiveFilter):
 27    """
 28    Stabilized Fast Transversal RLS (SFT-RLS) algorithm (real-valued).
 29
 30    The Stabilized Fast Transversal RLS is a numerically robust variant of the
 31    Fast Transversal RLS. It preserves the approximately :math:`O(M)` per-sample
 32    complexity of transversal RLS recursions while improving stability in
 33    finite-precision arithmetic by introducing feedback stabilization in the
 34    backward prediction recursion (via ``kappa1``, ``kappa2``, ``kappa3``) and by
 35    guarding divisions/energies through floors and optional clipping.
 36
 37    This implementation corresponds to Diniz (Alg. 8.2) and is restricted to
 38    **real-valued** input/desired sequences (enforced by ``ensure_real_signals``).
 39
 40    Parameters
 41    ----------
 42    filter_order : int
 43        FIR filter order ``M``. The number of coefficients is ``M + 1``.
 44    forgetting_factor : float, optional
 45        Exponential forgetting factor ``lambda``. Default is 0.99.
 46    epsilon : float, optional
 47        Positive initialization for the minimum prediction-error energies
 48        (regularization), used as :math:`\\xi_{\\min}(0)` in the recursions.
 49        Default is 1e-1.
 50    kappa1, kappa2, kappa3 : float, optional
 51        Stabilization constants used to form stabilized versions of the backward
 52        prediction error. Defaults are 1.5, 2.5, and 1.0.
 53    w_init : array_like of float, optional
 54        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 55        initializes with zeros.
 56    denom_floor : float, optional
 57        Safety floor used to clamp denominators before inversion to prevent
 58        overflow/underflow and non-finite values during internal recursions.
 59        If None, a small value based on machine ``tiny`` is used.
 60    xi_floor : float, optional
 61        Safety floor for prediction error energies (e.g., ``xi_min_f``,
 62        ``xi_min_b``). If None, a small value based on machine ``tiny`` is used.
 63    gamma_clip : float, optional
 64        Optional clipping threshold applied to an intermediate conversion factor
 65        to avoid extreme values (singularities). If None, no clipping is applied.
 66
 67    Notes
 68    -----
 69    Convention
 70    ~~~~~~~~~~
 71    At time ``k``, the internal regressor window has length ``M + 2`` (denoted
 72    ``r`` in the code) and is formed in reverse order (most recent sample first).
 73    The main adaptive filter uses the first ``M + 1`` entries of this window.
 74
 75    A priori vs a posteriori
 76    ~~~~~~~~~~~~~~~~~~~~~~~~
 77    The a priori output and error are:
 78
 79    .. math::
 80        y(k) = w^T(k-1) x_k, \\qquad e(k) = d(k) - y(k),
 81
 82    and the a posteriori error returned by this implementation is:
 83
 84    .. math::
 85        e_{\\text{post}}(k) = \\gamma(k)\\, e(k),
 86
 87    where :math:`\\gamma(k)` is produced by the stabilized transversal recursions.
 88
 89    Stabilization with kappa
 90    ~~~~~~~~~~~~~~~~~~~~~~~~
 91    The algorithm forms stabilized backward-error combinations (three variants)
 92    from two backward-error lines in the recursion (named ``e_b_line1`` and
 93    ``e_b_line2`` in the code). Conceptually:
 94
 95    .. math::
 96        e_{b,i}(k) = \\kappa_i\\, e_{b,2}(k) + (1-\\kappa_i)\\, e_{b,1}(k),
 97
 98    for :math:`\\kappa_i \\in \\{\\kappa_1, \\kappa_2, \\kappa_3\\}`.
 99
100    Numerical safeguards
101    ~~~~~~~~~~~~~~~~~~~~
102    Several denominators are clamped to ``denom_floor`` before inversion and
103    minimum energies are floored by ``xi_floor``. The counts of clamp events are
104    tracked and returned in ``extra["clamp_stats"]``.
105
106    References
107    ----------
108    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
109    Implementation*, 5th ed., Algorithm 8.2.
110    """
111    supports_complex: bool = False
112    lambda_: float
113    epsilon: float
114    kappa1: float
115    kappa2: float
116    kappa3: float
117    denom_floor: float
118    xi_floor: float
119    gamma_clip: Optional[float]
120    n_coeffs: int
121
122    def __init__(
123        self,
124        filter_order: int,
125        forgetting_factor: float = 0.99,
126        epsilon: float = 1e-1,
127        kappa1: float = 1.5,
128        kappa2: float = 2.5,
129        kappa3: float = 1.0,
130        w_init: Optional[Union[np.ndarray, list]] = None,
131        denom_floor: Optional[float] = None,
132        xi_floor: Optional[float] = None,
133        gamma_clip: Optional[float] = None,
134    ) -> None:
135        super().__init__(filter_order=filter_order, w_init=w_init)
136
137        self.filter_order = int(filter_order)
138        self.n_coeffs = int(self.filter_order + 1)
139        self.lambda_ = float(forgetting_factor)
140        self.epsilon = float(epsilon)
141        self.kappa1 = float(kappa1)
142        self.kappa2 = float(kappa2)
143        self.kappa3 = float(kappa3)
144
145        finfo = np.finfo(np.float64)
146        self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3)
147        self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6)
148        self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None
149
150        self.w = np.asarray(self.w, dtype=np.float64)
151
152    @staticmethod
153    def _clamp_denom(den: float, floor: float) -> float:
154        if (not np.isfinite(den)) or (abs(den) < floor):
155            return float(np.copysign(floor, den if den != 0 else 1.0))
156        return float(den)
157
158    def _safe_inv(self, den: float, floor: float, clamp_counter: Dict[str, int], key: str) -> float:
159        den_clamped = self._clamp_denom(den, floor)
160        if den_clamped != den:
161            clamp_counter[key] = clamp_counter.get(key, 0) + 1
162        return 1.0 / den_clamped
163
164    @ensure_real_signals
165    @validate_input
166    def optimize(
167        self,
168        input_signal: np.ndarray,
169        desired_signal: np.ndarray,
170        verbose: bool = False,
171        return_internal_states: bool = False,
172    ) -> OptimizationResult:
173        """
174        Executes the stabilized FT-RLS adaptation loop (real-valued).
175
176        Parameters
177        ----------
178        input_signal : array_like of float
179            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
180        desired_signal : array_like of float
181            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
182            Must have the same length as ``input_signal``.
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, includes internal trajectories in ``result.extra``:
187            - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` (forward minimum
188              prediction-error energy).
189            - ``"xi_min_b"``: ndarray of float, shape ``(N,)`` (backward minimum
190              prediction-error energy).
191            - ``"gamma"``: ndarray of float, shape ``(N,)`` (conversion factor).
192
193        Returns
194        -------
195        OptimizationResult
196            Result object with fields:
197            - outputs : ndarray of float, shape ``(N,)``
198                A priori output sequence ``y[k]``.
199            - errors : ndarray of float, shape ``(N,)``
200                A priori error sequence ``e[k] = d[k] - y[k]``.
201            - coefficients : ndarray of float
202                Coefficient history recorded by the base class.
203            - error_type : str
204                Set to ``"a_priori"``.
205            - extra : dict
206                Always includes:
207                - ``"errors_posteriori"``: ndarray of float, shape ``(N,)`` with
208                  :math:`e_{\\text{post}}(k)`.
209                - ``"clamp_stats"``: dict with counters of denominator clamps.
210                Additionally includes ``"xi_min_f"``, ``"xi_min_b"``, and
211                ``"gamma"`` if ``return_internal_states=True``.
212        """
213        tic: float = time()
214
215        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
216        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
217
218        n_samples: int = int(x.size)
219        n_taps: int = int(self.filter_order + 1)
220        reg_len: int = int(self.filter_order + 2)
221
222        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
223        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
224        errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64)
225
226        xi_min_f: float = float(self.epsilon)
227        xi_min_b: float = float(self.epsilon)
228        gamma_n_3: float = 1.0
229
230        xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
231        xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
232        gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
233
234        w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64)
235        w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64)
236        phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64)
237        phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64)
238
239        x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64)
240        x_padded[n_taps:] = x
241
242        clamp_counter: Dict[str, int] = {}
243
244        for k in range(n_samples):
245            r: np.ndarray = x_padded[k : k + reg_len][::-1]
246
247            e_f_priori: float = float(r[0] - np.dot(w_f, r[1:]))
248            e_f_post: float = float(e_f_priori * gamma_n_3)
249
250            scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f")
251            phi_hat_np1[0] = scale * e_f_priori
252            phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f
253
254            inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3")
255            gamma_np1_1: float = self._safe_inv(
256                inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1"
257            )
258
259            if self.gamma_clip is not None:
260                gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip))
261
262            inv_xi_f_lam: float = self._safe_inv(
263                xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f"
264            )
265            xi_min_f = max(
266                self._safe_inv(
267                    inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2),
268                    self.denom_floor,
269                    clamp_counter,
270                    "inv_den_xi_f",
271                ),
272                self.xi_floor,
273            )
274            w_f += phi_hat_n * e_f_post
275
276            e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1])
277            e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1]))
278
279            eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1))
280            eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2))
281            eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3))
282
283            inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1")
284            gamma_n_2: float = self._safe_inv(
285                inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2"
286            )
287
288            xi_min_b = max(
289                float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2),
290                self.xi_floor,
291            )
292
293            phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b
294            w_b += phi_hat_n * (eb3_1 * gamma_n_2)
295
296            gamma_n_3 = self._safe_inv(
297                1.0 + float(np.dot(phi_hat_n, r[:-1])),
298                self.denom_floor,
299                clamp_counter,
300                "inv_g_n3",
301            )
302
303            y_k: float = float(np.dot(self.w, r[:-1]))
304            outputs[k] = y_k
305            e_k: float = float(d[k] - y_k)
306            errors[k] = e_k
307            e_post_k: float = float(e_k * gamma_n_3)
308            errors_post[k] = e_post_k
309
310            self.w += phi_hat_n * e_post_k
311            self._record_history()
312
313            if return_internal_states and xi_f_track is not None:
314                xi_f_track[k], xi_b_track[k], gamma_track[k] = xi_min_f, xi_min_b, gamma_n_3
315
316        runtime_s: float = float(time() - tic)
317        if verbose:
318            print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms")
319
320        extra: Dict[str, Any] = {"errors_posteriori": errors_post, "clamp_stats": clamp_counter}
321        if return_internal_states:
322            extra.update({"xi_min_f": xi_f_track, "xi_min_b": xi_b_track, "gamma": gamma_track})
323
324        return self._pack_results(
325            outputs=outputs,
326            errors=errors,
327            runtime_s=runtime_s,
328            error_type="a_priori",
329            extra=extra,
330        )

Stabilized Fast Transversal RLS (SFT-RLS) algorithm (real-valued).

The Stabilized Fast Transversal RLS is a numerically robust variant of the Fast Transversal RLS. It preserves the approximately \( O(M) \) per-sample complexity of transversal RLS recursions while improving stability in finite-precision arithmetic by introducing feedback stabilization in the backward prediction recursion (via kappa1, kappa2, kappa3) and by guarding divisions/energies through floors and optional clipping.

This implementation corresponds to Diniz (Alg. 8.2) and is restricted to real-valued input/desired sequences (enforced by ensure_real_signals).

Parameters

filter_order : int FIR filter order M. The number of coefficients is M + 1. forgetting_factor : float, optional Exponential forgetting factor lambda. Default is 0.99. epsilon : float, optional Positive initialization for the minimum prediction-error energies (regularization), used as \( \xi_{\min}(0) \) in the recursions. Default is 1e-1. kappa1, kappa2, kappa3 : float, optional Stabilization constants used to form stabilized versions of the backward prediction error. Defaults are 1.5, 2.5, and 1.0. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros. denom_floor : float, optional Safety floor used to clamp denominators before inversion to prevent overflow/underflow and non-finite values during internal recursions. If None, a small value based on machine tiny is used. xi_floor : float, optional Safety floor for prediction error energies (e.g., xi_min_f, xi_min_b). If None, a small value based on machine tiny is used. gamma_clip : float, optional Optional clipping threshold applied to an intermediate conversion factor to avoid extreme values (singularities). If None, no clipping is applied.

Notes

Convention ~~ At time k, the internal regressor window has length M + 2 (denoted r in the code) and is formed in reverse order (most recent sample first). The main adaptive filter uses the first M + 1 entries of this window.

A priori vs a posteriori ~~~~~~~~ The a priori output and error are:

$$y(k) = w^T(k-1) x_k, \qquad e(k) = d(k) - y(k),$$

and the a posteriori error returned by this implementation is:

$$e_{\text{post}}(k) = \gamma(k)\, e(k),$$

where \( \gamma(k) \) is produced by the stabilized transversal recursions.

Stabilization with kappa ~~~~~~~~ The algorithm forms stabilized backward-error combinations (three variants) from two backward-error lines in the recursion (named e_b_line1 and e_b_line2 in the code). Conceptually:

$$e_{b,i}(k) = \kappa_i\, e_{b,2}(k) + (1-\kappa_i)\, e_{b,1}(k),$$

for \( \kappa_i \in {\kappa_1, \kappa_2, \kappa_3} \).

Numerical safeguards ~~~~ Several denominators are clamped to denom_floor before inversion and minimum energies are floored by xi_floor. The counts of clamp events are tracked and returned in extra["clamp_stats"].

References

Implementation*, 5th ed., Algorithm 8.2.


StabFastRLS( filter_order: int, forgetting_factor: float = 0.99, epsilon: float = 0.1, kappa1: float = 1.5, kappa2: float = 2.5, kappa3: float = 1.0, w_init: Union[numpy.ndarray, list, NoneType] = None, denom_floor: Optional[float] = None, xi_floor: Optional[float] = None, gamma_clip: Optional[float] = None)
122    def __init__(
123        self,
124        filter_order: int,
125        forgetting_factor: float = 0.99,
126        epsilon: float = 1e-1,
127        kappa1: float = 1.5,
128        kappa2: float = 2.5,
129        kappa3: float = 1.0,
130        w_init: Optional[Union[np.ndarray, list]] = None,
131        denom_floor: Optional[float] = None,
132        xi_floor: Optional[float] = None,
133        gamma_clip: Optional[float] = None,
134    ) -> None:
135        super().__init__(filter_order=filter_order, w_init=w_init)
136
137        self.filter_order = int(filter_order)
138        self.n_coeffs = int(self.filter_order + 1)
139        self.lambda_ = float(forgetting_factor)
140        self.epsilon = float(epsilon)
141        self.kappa1 = float(kappa1)
142        self.kappa2 = float(kappa2)
143        self.kappa3 = float(kappa3)
144
145        finfo = np.finfo(np.float64)
146        self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3)
147        self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6)
148        self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None
149
150        self.w = np.asarray(self.w, dtype=np.float64)
supports_complex: bool = False
lambda_: float
epsilon: float
kappa1: float
kappa2: float
kappa3: float
denom_floor: float
xi_floor: float
gamma_clip: Optional[float]
n_coeffs: int
filter_order
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
164    @ensure_real_signals
165    @validate_input
166    def optimize(
167        self,
168        input_signal: np.ndarray,
169        desired_signal: np.ndarray,
170        verbose: bool = False,
171        return_internal_states: bool = False,
172    ) -> OptimizationResult:
173        """
174        Executes the stabilized FT-RLS adaptation loop (real-valued).
175
176        Parameters
177        ----------
178        input_signal : array_like of float
179            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
180        desired_signal : array_like of float
181            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
182            Must have the same length as ``input_signal``.
183        verbose : bool, optional
184            If True, prints the total runtime after completion.
185        return_internal_states : bool, optional
186            If True, includes internal trajectories in ``result.extra``:
187            - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` (forward minimum
188              prediction-error energy).
189            - ``"xi_min_b"``: ndarray of float, shape ``(N,)`` (backward minimum
190              prediction-error energy).
191            - ``"gamma"``: ndarray of float, shape ``(N,)`` (conversion factor).
192
193        Returns
194        -------
195        OptimizationResult
196            Result object with fields:
197            - outputs : ndarray of float, shape ``(N,)``
198                A priori output sequence ``y[k]``.
199            - errors : ndarray of float, shape ``(N,)``
200                A priori error sequence ``e[k] = d[k] - y[k]``.
201            - coefficients : ndarray of float
202                Coefficient history recorded by the base class.
203            - error_type : str
204                Set to ``"a_priori"``.
205            - extra : dict
206                Always includes:
207                - ``"errors_posteriori"``: ndarray of float, shape ``(N,)`` with
208                  :math:`e_{\\text{post}}(k)`.
209                - ``"clamp_stats"``: dict with counters of denominator clamps.
210                Additionally includes ``"xi_min_f"``, ``"xi_min_b"``, and
211                ``"gamma"`` if ``return_internal_states=True``.
212        """
213        tic: float = time()
214
215        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
216        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
217
218        n_samples: int = int(x.size)
219        n_taps: int = int(self.filter_order + 1)
220        reg_len: int = int(self.filter_order + 2)
221
222        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
223        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
224        errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64)
225
226        xi_min_f: float = float(self.epsilon)
227        xi_min_b: float = float(self.epsilon)
228        gamma_n_3: float = 1.0
229
230        xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
231        xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
232        gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
233
234        w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64)
235        w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64)
236        phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64)
237        phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64)
238
239        x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64)
240        x_padded[n_taps:] = x
241
242        clamp_counter: Dict[str, int] = {}
243
244        for k in range(n_samples):
245            r: np.ndarray = x_padded[k : k + reg_len][::-1]
246
247            e_f_priori: float = float(r[0] - np.dot(w_f, r[1:]))
248            e_f_post: float = float(e_f_priori * gamma_n_3)
249
250            scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f")
251            phi_hat_np1[0] = scale * e_f_priori
252            phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f
253
254            inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3")
255            gamma_np1_1: float = self._safe_inv(
256                inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1"
257            )
258
259            if self.gamma_clip is not None:
260                gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip))
261
262            inv_xi_f_lam: float = self._safe_inv(
263                xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f"
264            )
265            xi_min_f = max(
266                self._safe_inv(
267                    inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2),
268                    self.denom_floor,
269                    clamp_counter,
270                    "inv_den_xi_f",
271                ),
272                self.xi_floor,
273            )
274            w_f += phi_hat_n * e_f_post
275
276            e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1])
277            e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1]))
278
279            eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1))
280            eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2))
281            eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3))
282
283            inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1")
284            gamma_n_2: float = self._safe_inv(
285                inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2"
286            )
287
288            xi_min_b = max(
289                float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2),
290                self.xi_floor,
291            )
292
293            phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b
294            w_b += phi_hat_n * (eb3_1 * gamma_n_2)
295
296            gamma_n_3 = self._safe_inv(
297                1.0 + float(np.dot(phi_hat_n, r[:-1])),
298                self.denom_floor,
299                clamp_counter,
300                "inv_g_n3",
301            )
302
303            y_k: float = float(np.dot(self.w, r[:-1]))
304            outputs[k] = y_k
305            e_k: float = float(d[k] - y_k)
306            errors[k] = e_k
307            e_post_k: float = float(e_k * gamma_n_3)
308            errors_post[k] = e_post_k
309
310            self.w += phi_hat_n * e_post_k
311            self._record_history()
312
313            if return_internal_states and xi_f_track is not None:
314                xi_f_track[k], xi_b_track[k], gamma_track[k] = xi_min_f, xi_min_b, gamma_n_3
315
316        runtime_s: float = float(time() - tic)
317        if verbose:
318            print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms")
319
320        extra: Dict[str, Any] = {"errors_posteriori": errors_post, "clamp_stats": clamp_counter}
321        if return_internal_states:
322            extra.update({"xi_min_f": xi_f_track, "xi_min_b": xi_b_track, "gamma": gamma_track})
323
324        return self._pack_results(
325            outputs=outputs,
326            errors=errors,
327            runtime_s=runtime_s,
328            error_type="a_priori",
329            extra=extra,
330        )

Executes the stabilized FT-RLS adaptation loop (real-valued).

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes internal trajectories in result.extra: - "xi_min_f": ndarray of float, shape (N,) (forward minimum prediction-error energy). - "xi_min_b": ndarray of float, shape (N,) (backward minimum prediction-error energy). - "gamma": ndarray of float, shape (N,) (conversion factor).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) A priori output sequence y[k]. - errors : ndarray of float, shape (N,) A priori error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict Always includes: - "errors_posteriori": ndarray of float, shape (N,) with \( e_{\text{post}}(k) \). - "clamp_stats": dict with counters of denominator clamps. Additionally includes "xi_min_f", "xi_min_b", and "gamma" if return_internal_states=True.

class QRRLS(pydaptivefiltering.AdaptiveFilter):
 35class QRRLS(AdaptiveFilter):
 36    """
 37    QR-RLS adaptive filter using Givens rotations (real-valued).
 38
 39    QR-decomposition RLS implementation based on Diniz (Alg. 9.1, 3rd ed.),
 40    following the reference MATLAB routine ``QR_RLS.m``. This variant maintains
 41    internal state variables closely matching the MATLAB code and applies
 42    sequential real Givens rotations to a stacked system.
 43
 44    Parameters
 45    ----------
 46    filter_order : int
 47        Adaptive FIR filter order ``M``. The number of coefficients is ``M+1``.
 48    lamb : float, optional
 49        Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.99.
 50    w_init : array_like of float, optional
 51        Initial coefficient vector ``w(0)`` with shape ``(M+1,)``. If None,
 52        initializes with zeros.
 53    denom_floor : float, optional
 54        Small positive floor used to avoid division by (near) zero in scalar
 55        denominators. Default is 1e-18.
 56
 57    Notes
 58    -----
 59    Real-valued only
 60        This implementation is restricted to real-valued signals and coefficients
 61        (``supports_complex=False``). The constraint is enforced via
 62        ``@ensure_real_signals`` on :meth:`optimize`.
 63
 64    State variables (MATLAB naming)
 65        This implementation keeps the same key state variables as ``QR_RLS.m``:
 66
 67        - ``ULineMatrix`` : ndarray, shape ``(M+1, M+1)``
 68          Upper-triangular-like matrix updated by sequential Givens rotations.
 69        - ``dLine_q2`` : ndarray, shape ``(M+1,)``
 70          Transformed desired vector accumulated through the same rotations.
 71        - ``gamma`` : float
 72          Scalar accumulated as the product of Givens cosines in each iteration.
 73
 74    Givens-rotation structure (high level)
 75        At each iteration, the algorithm applies Givens rotations to eliminate
 76        components of the stacked vector ``[regressor; ULineMatrix]`` while
 77        applying the same rotations to ``[d_line; dLine_q2]``. The resulting
 78        system is then solved by back-substitution to obtain the updated weights.
 79
 80    Output/error conventions (MATLAB-style)
 81        The returned ``errors`` correspond to the MATLAB ``errorVector``:
 82
 83        .. math::
 84            e[k] = d_{line}[k] \\cdot \\gamma[k],
 85
 86        and the reported output is computed as:
 87
 88        .. math::
 89            y[k] = d[k] - e[k].
 90
 91        Since this error is formed after the rotation steps (i.e., after the
 92        QR-update stage), the method sets ``error_type="a_posteriori"``.
 93
 94    References
 95    ----------
 96    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 97       Implementation*, 3rd ed., Algorithm 9.1 (QR-RLS).
 98    """
 99    supports_complex: bool = False
100
101    lamb: float
102    n_coeffs: int
103    ULineMatrix: np.ndarray
104    dLine_q2: np.ndarray
105    _tiny: float
106
107    def __init__(
108        self,
109        filter_order: int,
110        lamb: float = 0.99,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        denom_floor: float = 1e-18,
114    ) -> None:
115        super().__init__(filter_order=int(filter_order), w_init=w_init)
116
117        self.lamb = float(lamb)
118        if not (0.0 < self.lamb <= 1.0):
119            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.")
120
121        self._tiny = float(denom_floor)
122
123        self.n_coeffs = int(self.filter_order) + 1
124
125        self.w = np.asarray(self.w, dtype=np.float64)
126
127        if w_init is not None:
128            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
129            if w0.size != self.n_coeffs:
130                raise ValueError(
131                    f"w_init must have length {self.n_coeffs}, got {w0.size}."
132                )
133            self.w = w0.copy()
134
135        self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64)
136        self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64)
137
138        self.w_history = []
139        self._record_history()
140
141    @staticmethod
142    def _givens_rotate_rows(
143        row0: np.ndarray,
144        row1: np.ndarray,
145        cos_t: float,
146        sin_t: float,
147    ) -> tuple[np.ndarray, np.ndarray]:
148        """
149        Applies a real 2x2 Givens rotation to a pair of stacked rows.
150
151        The rotation is:
152
153        .. math::
154            \\begin{bmatrix}
155                \\cos\\theta & -\\sin\\theta \\\\
156                \\sin\\theta &  \\cos\\theta
157            \\end{bmatrix}
158            \\begin{bmatrix}
159                \\mathrm{row0} \\\\
160                \\mathrm{row1}
161            \\end{bmatrix}
162            =
163            \\begin{bmatrix}
164                \\mathrm{row0}' \\\\
165                \\mathrm{row1}'
166            \\end{bmatrix}.
167
168        Parameters
169        ----------
170        row0, row1 : ndarray of float
171            1-D arrays with the same length (representing two rows to be rotated).
172        cos_t, sin_t : float
173            Givens rotation cosine and sine.
174
175        Returns
176        -------
177        (row0_rot, row1_rot) : tuple of ndarray
178            Rotated rows.
179        """
180        new0 = cos_t * row0 - sin_t * row1
181        new1 = sin_t * row0 + cos_t * row1
182        return new0, new1
183
184    @ensure_real_signals
185    @validate_input
186    def optimize(
187        self,
188        input_signal: np.ndarray,
189        desired_signal: np.ndarray,
190        verbose: bool = False,
191        return_internal_states: bool = False,
192    ) -> OptimizationResult:
193        """
194        Executes the QR-RLS adaptation loop (MATLAB-style recursion).
195
196        Parameters
197        ----------
198        input_signal : array_like of float
199            Real-valued input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
200        desired_signal : array_like of float
201            Real-valued desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
202        verbose : bool, optional
203            If True, prints the total runtime after completion.
204        return_internal_states : bool, optional
205            If True, includes the last internal states in ``result.extra``:
206            ``"ULineMatrix_last"``, ``"dLine_q2_last"``, ``"gamma_last"``,
207            and ``"d_line_last"``.
208
209        Returns
210        -------
211        OptimizationResult
212            Result object with fields:
213            - outputs : ndarray of float, shape ``(N,)``
214                Scalar output sequence as computed by the MATLAB-style routine:
215                ``y[k] = d[k] - e[k]``.
216            - errors : ndarray of float, shape ``(N,)``
217                MATLAB-style a posteriori error quantity:
218                ``e[k] = d_line[k] * gamma[k]``.
219            - coefficients : ndarray of float
220                Coefficient history recorded by the base class.
221            - error_type : str
222                Set to ``"a_posteriori"``.
223            - extra : dict, optional
224                Present only if ``return_internal_states=True`` with:
225                - ``ULineMatrix_last`` : ndarray
226                    Final ``ULineMatrix``.
227                - ``dLine_q2_last`` : ndarray
228                    Final ``dLine_q2``.
229                - ``gamma_last`` : float
230                    ``gamma`` at the last iteration.
231                - ``d_line_last`` : float
232                    ``d_line`` at the last iteration.
233                - ``forgetting_factor`` : float
234                    The forgetting factor ``lambda`` used.
235        """
236        t0 = perf_counter()
237
238        x = np.asarray(input_signal, dtype=np.float64).ravel()
239        d = np.asarray(desired_signal, dtype=np.float64).ravel()
240
241        n_samples = int(d.size)
242        n = int(self.n_coeffs)
243        M = int(self.filter_order)
244
245        if n_samples < n:
246            raise ValueError(
247                f"QR-RLS needs at least (filter_order+1) samples. "
248                f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}."
249            )
250
251        outputs = np.zeros(n_samples, dtype=np.float64)
252        errors = np.zeros(n_samples, dtype=np.float64)
253
254        self.ULineMatrix.fill(0.0)
255        self.dLine_q2.fill(0.0)
256
257        self.w_history = []
258        self._record_history()
259
260        denom0 = float(x[0])
261        if abs(denom0) < self._tiny:
262            denom0 = self._tiny if denom0 >= 0.0 else -self._tiny
263
264        for kt in range(n):
265            w_tmp = np.zeros(n, dtype=np.float64)
266            w_tmp[0] = float(d[0] / denom0)
267
268            for ct in range(1, kt + 1):
269                num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct])
270                w_tmp[ct] = float(num / denom0)
271
272            self.w = w_tmp
273            self._record_history()
274
275            xk = np.zeros(n, dtype=np.float64)
276            start = max(0, kt - M)
277            seg = x[start : kt + 1][::-1]
278            xk[: seg.size] = seg
279            outputs[kt] = float(np.dot(w_tmp, xk))
280
281        sqrt_lam = float(np.sqrt(self.lamb))
282
283        for it in range(M + 1):
284            scale = float(self.lamb ** ((it + 1) / 2.0))
285
286            vec = x[(n - it - 1) :: -1]
287            self.ULineMatrix[it, 0 : (n - it)] = scale * vec
288
289            self.dLine_q2[it] = scale * float(d[n - it - 1])
290
291        gamma_last: float = 1.0
292        d_line_last: float = float(d[n - 1])
293
294        for k in range(n, n_samples):
295            gamma = 1.0
296            d_line = float(d[k])
297
298            reg = x[k : k - M - 1 : -1].copy()
299
300            for rt in range(M + 1):
301                row_u = rt
302                col_u = n - 1 - rt
303                idx_r = n - 1 - rt
304
305                u_val = float(self.ULineMatrix[row_u, col_u])
306                r_val = float(reg[idx_r])
307
308                cI = float(np.sqrt(u_val * u_val + r_val * r_val))
309                if cI < self._tiny:
310                    cos_t, sin_t = 1.0, 0.0
311                else:
312                    cos_t, sin_t = (u_val / cI), (r_val / cI)
313
314                reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows(
315                    reg, self.ULineMatrix[row_u, :], cos_t, sin_t
316                )
317
318                gamma *= cos_t
319
320                dq2_rt = float(self.dLine_q2[row_u])
321                new_d_line = (cos_t * d_line) - (sin_t * dq2_rt)
322                new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt)
323                d_line = float(new_d_line)
324                self.dLine_q2[row_u] = float(new_dq2_rt)
325
326            d_bar = np.empty(n + 1, dtype=np.float64)
327            d_bar[0] = d_line
328            d_bar[1:] = self.dLine_q2
329
330            w_new = np.zeros(n, dtype=np.float64)
331
332            den = float(self.ULineMatrix[n - 1, 0])
333            if abs(den) < self._tiny:
334                den = self._tiny if den >= 0.0 else -self._tiny
335            w_new[0] = float(d_bar[n] / den)
336
337            for it in range(1, M + 1):
338                row = n - 1 - it
339                u_vec = self.ULineMatrix[row, 0:it][::-1]
340                w_vec = w_new[0:it][::-1]
341                num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it])
342
343                den = float(self.ULineMatrix[row, it])
344                if abs(den) < self._tiny:
345                    den = self._tiny if den >= 0.0 else -self._tiny
346
347                w_new[it] = float(num / den)
348
349            self.w = w_new
350            self._record_history()
351
352            self.dLine_q2 *= sqrt_lam
353            self.ULineMatrix *= sqrt_lam
354
355            errors[k] = float(d_line * gamma)
356            outputs[k] = float(d[k] - errors[k])
357
358            gamma_last = float(gamma)
359            d_line_last = float(d_line)
360
361        runtime_s = float(perf_counter() - t0)
362        if verbose:
363            print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms")
364
365        extra: Optional[Dict[str, Any]] = None
366        if return_internal_states:
367            extra = {
368                "ULineMatrix_last": self.ULineMatrix.copy(),
369                "dLine_q2_last": self.dLine_q2.copy(),
370                "gamma_last": gamma_last,
371                "d_line_last": d_line_last,
372                "forgetting_factor": float(self.lamb),
373            }
374
375        return self._pack_results(
376            outputs=outputs,
377            errors=errors,
378            runtime_s=runtime_s,
379            error_type="a_posteriori",
380            extra=extra,
381        )

QR-RLS adaptive filter using Givens rotations (real-valued).

QR-decomposition RLS implementation based on Diniz (Alg. 9.1, 3rd ed.), following the reference MATLAB routine QR_RLS.m. This variant maintains internal state variables closely matching the MATLAB code and applies sequential real Givens rotations to a stacked system.

Parameters

filter_order : int Adaptive FIR filter order M. The number of coefficients is M+1. lamb : float, optional Forgetting factor lambda with 0 < lambda <= 1. Default is 0.99. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (M+1,). If None, initializes with zeros. denom_floor : float, optional Small positive floor used to avoid division by (near) zero in scalar denominators. Default is 1e-18.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

State variables (MATLAB naming) This implementation keeps the same key state variables as QR_RLS.m:

- ``ULineMatrix`` : ndarray, shape ``(M+1, M+1)``
  Upper-triangular-like matrix updated by sequential Givens rotations.
- ``dLine_q2`` : ndarray, shape ``(M+1,)``
  Transformed desired vector accumulated through the same rotations.
- ``gamma`` : float
  Scalar accumulated as the product of Givens cosines in each iteration.

Givens-rotation structure (high level) At each iteration, the algorithm applies Givens rotations to eliminate components of the stacked vector [regressor; ULineMatrix] while applying the same rotations to [d_line; dLine_q2]. The resulting system is then solved by back-substitution to obtain the updated weights.

Output/error conventions (MATLAB-style) The returned errors correspond to the MATLAB errorVector:

$$e[k] = d_{line}[k] \cdot \gamma[k],$$

and the reported output is computed as:

$$y[k] = d[k] - e[k].$$

Since this error is formed after the rotation steps (i.e., after the
QR-update stage), the method sets ``error_type="a_posteriori"``.

References


QRRLS( filter_order: int, lamb: float = 0.99, w_init: Union[numpy.ndarray, list, NoneType] = None, *, denom_floor: float = 1e-18)
107    def __init__(
108        self,
109        filter_order: int,
110        lamb: float = 0.99,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        denom_floor: float = 1e-18,
114    ) -> None:
115        super().__init__(filter_order=int(filter_order), w_init=w_init)
116
117        self.lamb = float(lamb)
118        if not (0.0 < self.lamb <= 1.0):
119            raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.")
120
121        self._tiny = float(denom_floor)
122
123        self.n_coeffs = int(self.filter_order) + 1
124
125        self.w = np.asarray(self.w, dtype=np.float64)
126
127        if w_init is not None:
128            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
129            if w0.size != self.n_coeffs:
130                raise ValueError(
131                    f"w_init must have length {self.n_coeffs}, got {w0.size}."
132                )
133            self.w = w0.copy()
134
135        self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64)
136        self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64)
137
138        self.w_history = []
139        self._record_history()
supports_complex: bool = False
lamb: float
n_coeffs: int
ULineMatrix: numpy.ndarray
dLine_q2: numpy.ndarray
w
w_history
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
184    @ensure_real_signals
185    @validate_input
186    def optimize(
187        self,
188        input_signal: np.ndarray,
189        desired_signal: np.ndarray,
190        verbose: bool = False,
191        return_internal_states: bool = False,
192    ) -> OptimizationResult:
193        """
194        Executes the QR-RLS adaptation loop (MATLAB-style recursion).
195
196        Parameters
197        ----------
198        input_signal : array_like of float
199            Real-valued input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
200        desired_signal : array_like of float
201            Real-valued desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
202        verbose : bool, optional
203            If True, prints the total runtime after completion.
204        return_internal_states : bool, optional
205            If True, includes the last internal states in ``result.extra``:
206            ``"ULineMatrix_last"``, ``"dLine_q2_last"``, ``"gamma_last"``,
207            and ``"d_line_last"``.
208
209        Returns
210        -------
211        OptimizationResult
212            Result object with fields:
213            - outputs : ndarray of float, shape ``(N,)``
214                Scalar output sequence as computed by the MATLAB-style routine:
215                ``y[k] = d[k] - e[k]``.
216            - errors : ndarray of float, shape ``(N,)``
217                MATLAB-style a posteriori error quantity:
218                ``e[k] = d_line[k] * gamma[k]``.
219            - coefficients : ndarray of float
220                Coefficient history recorded by the base class.
221            - error_type : str
222                Set to ``"a_posteriori"``.
223            - extra : dict, optional
224                Present only if ``return_internal_states=True`` with:
225                - ``ULineMatrix_last`` : ndarray
226                    Final ``ULineMatrix``.
227                - ``dLine_q2_last`` : ndarray
228                    Final ``dLine_q2``.
229                - ``gamma_last`` : float
230                    ``gamma`` at the last iteration.
231                - ``d_line_last`` : float
232                    ``d_line`` at the last iteration.
233                - ``forgetting_factor`` : float
234                    The forgetting factor ``lambda`` used.
235        """
236        t0 = perf_counter()
237
238        x = np.asarray(input_signal, dtype=np.float64).ravel()
239        d = np.asarray(desired_signal, dtype=np.float64).ravel()
240
241        n_samples = int(d.size)
242        n = int(self.n_coeffs)
243        M = int(self.filter_order)
244
245        if n_samples < n:
246            raise ValueError(
247                f"QR-RLS needs at least (filter_order+1) samples. "
248                f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}."
249            )
250
251        outputs = np.zeros(n_samples, dtype=np.float64)
252        errors = np.zeros(n_samples, dtype=np.float64)
253
254        self.ULineMatrix.fill(0.0)
255        self.dLine_q2.fill(0.0)
256
257        self.w_history = []
258        self._record_history()
259
260        denom0 = float(x[0])
261        if abs(denom0) < self._tiny:
262            denom0 = self._tiny if denom0 >= 0.0 else -self._tiny
263
264        for kt in range(n):
265            w_tmp = np.zeros(n, dtype=np.float64)
266            w_tmp[0] = float(d[0] / denom0)
267
268            for ct in range(1, kt + 1):
269                num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct])
270                w_tmp[ct] = float(num / denom0)
271
272            self.w = w_tmp
273            self._record_history()
274
275            xk = np.zeros(n, dtype=np.float64)
276            start = max(0, kt - M)
277            seg = x[start : kt + 1][::-1]
278            xk[: seg.size] = seg
279            outputs[kt] = float(np.dot(w_tmp, xk))
280
281        sqrt_lam = float(np.sqrt(self.lamb))
282
283        for it in range(M + 1):
284            scale = float(self.lamb ** ((it + 1) / 2.0))
285
286            vec = x[(n - it - 1) :: -1]
287            self.ULineMatrix[it, 0 : (n - it)] = scale * vec
288
289            self.dLine_q2[it] = scale * float(d[n - it - 1])
290
291        gamma_last: float = 1.0
292        d_line_last: float = float(d[n - 1])
293
294        for k in range(n, n_samples):
295            gamma = 1.0
296            d_line = float(d[k])
297
298            reg = x[k : k - M - 1 : -1].copy()
299
300            for rt in range(M + 1):
301                row_u = rt
302                col_u = n - 1 - rt
303                idx_r = n - 1 - rt
304
305                u_val = float(self.ULineMatrix[row_u, col_u])
306                r_val = float(reg[idx_r])
307
308                cI = float(np.sqrt(u_val * u_val + r_val * r_val))
309                if cI < self._tiny:
310                    cos_t, sin_t = 1.0, 0.0
311                else:
312                    cos_t, sin_t = (u_val / cI), (r_val / cI)
313
314                reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows(
315                    reg, self.ULineMatrix[row_u, :], cos_t, sin_t
316                )
317
318                gamma *= cos_t
319
320                dq2_rt = float(self.dLine_q2[row_u])
321                new_d_line = (cos_t * d_line) - (sin_t * dq2_rt)
322                new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt)
323                d_line = float(new_d_line)
324                self.dLine_q2[row_u] = float(new_dq2_rt)
325
326            d_bar = np.empty(n + 1, dtype=np.float64)
327            d_bar[0] = d_line
328            d_bar[1:] = self.dLine_q2
329
330            w_new = np.zeros(n, dtype=np.float64)
331
332            den = float(self.ULineMatrix[n - 1, 0])
333            if abs(den) < self._tiny:
334                den = self._tiny if den >= 0.0 else -self._tiny
335            w_new[0] = float(d_bar[n] / den)
336
337            for it in range(1, M + 1):
338                row = n - 1 - it
339                u_vec = self.ULineMatrix[row, 0:it][::-1]
340                w_vec = w_new[0:it][::-1]
341                num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it])
342
343                den = float(self.ULineMatrix[row, it])
344                if abs(den) < self._tiny:
345                    den = self._tiny if den >= 0.0 else -self._tiny
346
347                w_new[it] = float(num / den)
348
349            self.w = w_new
350            self._record_history()
351
352            self.dLine_q2 *= sqrt_lam
353            self.ULineMatrix *= sqrt_lam
354
355            errors[k] = float(d_line * gamma)
356            outputs[k] = float(d[k] - errors[k])
357
358            gamma_last = float(gamma)
359            d_line_last = float(d_line)
360
361        runtime_s = float(perf_counter() - t0)
362        if verbose:
363            print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms")
364
365        extra: Optional[Dict[str, Any]] = None
366        if return_internal_states:
367            extra = {
368                "ULineMatrix_last": self.ULineMatrix.copy(),
369                "dLine_q2_last": self.dLine_q2.copy(),
370                "gamma_last": gamma_last,
371                "d_line_last": d_line_last,
372                "forgetting_factor": float(self.lamb),
373            }
374
375        return self._pack_results(
376            outputs=outputs,
377            errors=errors,
378            runtime_s=runtime_s,
379            error_type="a_posteriori",
380            extra=extra,
381        )

Executes the QR-RLS adaptation loop (MATLAB-style recursion).

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Real-valued desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "ULineMatrix_last", "dLine_q2_last", "gamma_last", and "d_line_last".

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence as computed by the MATLAB-style routine: y[k] = d[k] - e[k]. - errors : ndarray of float, shape (N,) MATLAB-style a posteriori error quantity: e[k] = d_line[k] * gamma[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_posteriori". - extra : dict, optional Present only if return_internal_states=True with: - ULineMatrix_last : ndarray Final ULineMatrix. - dLine_q2_last : ndarray Final dLine_q2. - gamma_last : float gamma at the last iteration. - d_line_last : float d_line at the last iteration. - forgetting_factor : float The forgetting factor lambda used.

class ErrorEquation(pydaptivefiltering.AdaptiveFilter):
 26class ErrorEquation(AdaptiveFilter):
 27    """
 28    Equation-Error RLS for adaptive IIR filtering (real-valued).
 29
 30    The equation-error approach avoids the non-convexity of direct IIR
 31    output-error minimization by adapting the coefficients using an auxiliary
 32    (linear-in-parameters) error in which past outputs in the feedback path are
 33    replaced by past desired samples. This yields a quadratic (RLS-suitable)
 34    criterion while still producing a "true IIR" output for evaluation.
 35
 36    This implementation follows Diniz (3rd ed., Alg. 10.3) and is restricted to
 37    **real-valued** signals (enforced by ``ensure_real_signals``).
 38
 39    Parameters
 40    ----------
 41    zeros_order : int
 42        Numerator order ``N`` (number of zeros). The feedforward part has
 43        ``N + 1`` coefficients.
 44    poles_order : int
 45        Denominator order ``M`` (number of poles). The feedback part has ``M``
 46        coefficients.
 47    forgetting_factor : float, optional
 48        Exponential forgetting factor ``lambda``. Default is 0.99.
 49    epsilon : float, optional
 50        Positive initialization for the inverse correlation matrix used by RLS.
 51        Internally, the inverse covariance is initialized as:
 52
 53        .. math::
 54            S(0) = \\frac{1}{\\epsilon} I.
 55
 56        Default is 1e-3.
 57    w_init : array_like of float, optional
 58        Optional initial coefficient vector. If provided, it should have shape
 59        ``(M + N + 1,)`` following the parameter order described below. If None,
 60        the implementation initializes with zeros (and ignores ``w_init``).
 61
 62    Notes
 63    -----
 64    Parameterization (as implemented)
 65    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 66    The coefficient vector is arranged as:
 67
 68    - ``w[:M]``: feedback (pole) coefficients (denoted ``a`` in literature)
 69    - ``w[M:]``: feedforward (zero) coefficients (denoted ``b``)
 70
 71    Regressors and two outputs
 72    ~~~~~~~~~~~~~~~~~~~~~~~~~~
 73    At time ``k``, define ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T``.
 74    The algorithm forms two regressors:
 75
 76    - Output regressor (uses past *true outputs*):
 77
 78      .. math::
 79          \\varphi_y(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T.
 80
 81    - Equation regressor (uses past *desired samples*):
 82
 83      .. math::
 84          \\varphi_e(k) = [d(k-1), \\ldots, d(k-M),\\; x(k), \\ldots, x(k-N)]^T.
 85
 86    The reported output is the "true IIR" output computed with the output
 87    regressor:
 88
 89    .. math::
 90        y(k) = w^T(k)\\, \\varphi_y(k),
 91
 92    while the auxiliary "equation" output is:
 93
 94    .. math::
 95        y_{eq}(k) = w^T(k)\\, \\varphi_e(k).
 96
 97    The adaptation is driven by the *equation error*:
 98
 99    .. math::
100        e_{eq}(k) = d(k) - y_{eq}(k),
101
102    whereas the "output error" used for evaluation is:
103
104    .. math::
105        e(k) = d(k) - y(k).
106
107    Stability procedure
108    ~~~~~~~~~~~~~~~~~~~
109    After each update, the feedback coefficients ``w[:M]`` are stabilized by
110    reflecting any poles outside the unit circle back inside (pole reflection).
111
112    References
113    ----------
114    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
115       Implementation*, 3rd ed., Algorithm 10.3.
116    """
117
118    supports_complex: bool = False
119    zeros_order: int
120    poles_order: int
121    forgetting_factor: float
122    epsilon: float
123    n_coeffs: int
124    Sd: np.ndarray
125    y_buffer: np.ndarray
126    d_buffer: np.ndarray
127
128    def __init__(
129        self,
130        zeros_order: int,
131        poles_order: int,
132        forgetting_factor: float = 0.99,
133        epsilon: float = 1e-3,
134        w_init: Optional[Union[np.ndarray, list]] = None,
135    ) -> None:
136        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
137
138        self.zeros_order = int(zeros_order)
139        self.poles_order = int(poles_order)
140        self.forgetting_factor = float(forgetting_factor)
141        self.epsilon = float(epsilon)
142
143        self.n_coeffs = int(self.poles_order + self.zeros_order + 1)
144        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
145
146        self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64)
147
148        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
149        self.d_buffer = np.zeros(self.poles_order, dtype=np.float64)
150
151    def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray:
152        """
153        Enforces IIR stability by reflecting poles outside the unit circle back inside.
154        This ensures the recursive part of the filter does not diverge.
155        """
156        poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs))
157        poles: np.ndarray = np.roots(poly_coeffs)
158        mask: np.ndarray = np.abs(poles) > 1.0
159
160        if np.any(mask):
161            poles[mask] = 1.0 / np.conj(poles[mask])
162            new_poly: np.ndarray = np.poly(poles)
163            return -np.real(new_poly[1:])
164        return a_coeffs
165
166    @ensure_real_signals
167    @validate_input
168    def optimize(
169        self,
170        input_signal: np.ndarray,
171        desired_signal: np.ndarray,
172        verbose: bool = False,
173        return_internal_states: bool = False,
174    ) -> OptimizationResult:
175        """
176        Executes the equation-error RLS adaptation loop.
177
178        Parameters
179        ----------
180        input_signal : array_like of float
181            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
182        desired_signal : array_like of float
183            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
184            Must have the same length as ``input_signal``.
185        verbose : bool, optional
186            If True, prints the total runtime after completion.
187        return_internal_states : bool, optional
188            If True, includes the time history of the feedback (pole)
189            coefficients in ``result.extra["a_coefficients"]`` with shape
190            ``(N, poles_order)`` (or None if ``poles_order == 0``).
191
192        Returns
193        -------
194        OptimizationResult
195            Result object with fields:
196            - outputs : ndarray of float, shape ``(N,)``
197                "True IIR" output sequence ``y[k]`` computed with past outputs.
198            - errors : ndarray of float, shape ``(N,)``
199                Output error sequence ``e[k] = d[k] - y[k]``.
200            - coefficients : ndarray of float
201                Coefficient history recorded by the base class.
202            - error_type : str
203                Set to ``"equation_error"``.
204            - extra : dict
205                Always includes:
206                - ``"auxiliary_errors"``: ndarray of float, shape ``(N,)`` with
207                  the equation error ``e_eq[k] = d[k] - y_eq[k]`` used to drive
208                  the RLS update.
209                Additionally includes ``"a_coefficients"`` if
210                ``return_internal_states=True``.
211        """
212        tic: float = time()
213
214        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
215        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
216        n_samples: int = int(x.size)
217
218        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
219        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
220        errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64)
221
222        a_track: Optional[np.ndarray] = (
223            np.zeros((n_samples, self.poles_order), dtype=np.float64)
224            if (return_internal_states and self.poles_order > 0)
225            else None
226        )
227
228        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
229        x_padded[self.zeros_order:] = x
230
231        
232
233        for k in range(n_samples):
234            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
235            
236            reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x))
237            
238            reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x))
239
240            y_out: float = float(np.dot(self.w, reg_y))
241            y_equation: float = float(np.dot(self.w, reg_e))
242
243            outputs[k] = y_out
244            errors[k] = float(d[k] - y_out)
245            errors_aux[k] = float(d[k] - y_equation)
246
247            psi: np.ndarray = self.Sd @ reg_e
248            den: float = float(self.forgetting_factor + reg_e.T @ psi)
249
250            self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den)
251            self.w += (self.Sd @ reg_e) * errors_aux[k]
252
253            if self.poles_order > 0:
254                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
255
256                if return_internal_states and a_track is not None:
257                    a_track[k, :] = self.w[: self.poles_order]
258
259                self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1]))
260                self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1]))
261
262            self._record_history()
263
264        runtime_s: float = float(time() - tic)
265        if verbose:
266            print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms")
267
268        extra: Dict[str, Any] = {"auxiliary_errors": errors_aux}
269        if return_internal_states:
270            extra["a_coefficients"] = a_track
271
272        return self._pack_results(
273            outputs=outputs,
274            errors=errors,
275            runtime_s=runtime_s,
276            error_type="equation_error",
277            extra=extra,
278        )

Equation-Error RLS for adaptive IIR filtering (real-valued).

The equation-error approach avoids the non-convexity of direct IIR output-error minimization by adapting the coefficients using an auxiliary (linear-in-parameters) error in which past outputs in the feedback path are replaced by past desired samples. This yields a quadratic (RLS-suitable) criterion while still producing a "true IIR" output for evaluation.

This implementation follows Diniz (3rd ed., Alg. 10.3) and is restricted to real-valued signals (enforced by ensure_real_signals).

Parameters

zeros_order : int Numerator order N (number of zeros). The feedforward part has N + 1 coefficients. poles_order : int Denominator order M (number of poles). The feedback part has M coefficients. forgetting_factor : float, optional Exponential forgetting factor lambda. Default is 0.99. epsilon : float, optional Positive initialization for the inverse correlation matrix used by RLS. Internally, the inverse covariance is initialized as:

$$S(0) = \frac{1}{\epsilon} I.$$

Default is 1e-3.

w_init : array_like of float, optional Optional initial coefficient vector. If provided, it should have shape (M + N + 1,) following the parameter order described below. If None, the implementation initializes with zeros (and ignores w_init).

Notes

Parameterization (as implemented) ~~~~~~~~~ The coefficient vector is arranged as:

  • w[:M]: feedback (pole) coefficients (denoted a in literature)
  • w[M:]: feedforward (zero) coefficients (denoted b)

Regressors and two outputs ~~~~~~ At time k, define reg_x = [x(k), x(k-1), ..., x(k-N)]^T. The algorithm forms two regressors:

  • Output regressor (uses past true outputs):

    $$\varphi_y(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T.$$

  • Equation regressor (uses past desired samples):

    $$\varphi_e(k) = [d(k-1), \ldots, d(k-M),\; x(k), \ldots, x(k-N)]^T.$$

The reported output is the "true IIR" output computed with the output regressor:

$$y(k) = w^T(k)\, \varphi_y(k),$$

while the auxiliary "equation" output is:

$$y_{eq}(k) = w^T(k)\, \varphi_e(k).$$

The adaptation is driven by the equation error:

$$e_{eq}(k) = d(k) - y_{eq}(k),$$

whereas the "output error" used for evaluation is:

$$e(k) = d(k) - y(k).$$

Stability procedure ~~~~~~~ After each update, the feedback coefficients w[:M] are stabilized by reflecting any poles outside the unit circle back inside (pole reflection).

References


ErrorEquation( zeros_order: int, poles_order: int, forgetting_factor: float = 0.99, epsilon: float = 0.001, w_init: Union[numpy.ndarray, list, NoneType] = None)
128    def __init__(
129        self,
130        zeros_order: int,
131        poles_order: int,
132        forgetting_factor: float = 0.99,
133        epsilon: float = 1e-3,
134        w_init: Optional[Union[np.ndarray, list]] = None,
135    ) -> None:
136        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
137
138        self.zeros_order = int(zeros_order)
139        self.poles_order = int(poles_order)
140        self.forgetting_factor = float(forgetting_factor)
141        self.epsilon = float(epsilon)
142
143        self.n_coeffs = int(self.poles_order + self.zeros_order + 1)
144        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
145
146        self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64)
147
148        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
149        self.d_buffer = np.zeros(self.poles_order, dtype=np.float64)
supports_complex: bool = False
zeros_order: int
poles_order: int
forgetting_factor: float
epsilon: float
n_coeffs: int
Sd: numpy.ndarray
y_buffer: numpy.ndarray
d_buffer: numpy.ndarray
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
166    @ensure_real_signals
167    @validate_input
168    def optimize(
169        self,
170        input_signal: np.ndarray,
171        desired_signal: np.ndarray,
172        verbose: bool = False,
173        return_internal_states: bool = False,
174    ) -> OptimizationResult:
175        """
176        Executes the equation-error RLS adaptation loop.
177
178        Parameters
179        ----------
180        input_signal : array_like of float
181            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
182        desired_signal : array_like of float
183            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
184            Must have the same length as ``input_signal``.
185        verbose : bool, optional
186            If True, prints the total runtime after completion.
187        return_internal_states : bool, optional
188            If True, includes the time history of the feedback (pole)
189            coefficients in ``result.extra["a_coefficients"]`` with shape
190            ``(N, poles_order)`` (or None if ``poles_order == 0``).
191
192        Returns
193        -------
194        OptimizationResult
195            Result object with fields:
196            - outputs : ndarray of float, shape ``(N,)``
197                "True IIR" output sequence ``y[k]`` computed with past outputs.
198            - errors : ndarray of float, shape ``(N,)``
199                Output error sequence ``e[k] = d[k] - y[k]``.
200            - coefficients : ndarray of float
201                Coefficient history recorded by the base class.
202            - error_type : str
203                Set to ``"equation_error"``.
204            - extra : dict
205                Always includes:
206                - ``"auxiliary_errors"``: ndarray of float, shape ``(N,)`` with
207                  the equation error ``e_eq[k] = d[k] - y_eq[k]`` used to drive
208                  the RLS update.
209                Additionally includes ``"a_coefficients"`` if
210                ``return_internal_states=True``.
211        """
212        tic: float = time()
213
214        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
215        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
216        n_samples: int = int(x.size)
217
218        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
219        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
220        errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64)
221
222        a_track: Optional[np.ndarray] = (
223            np.zeros((n_samples, self.poles_order), dtype=np.float64)
224            if (return_internal_states and self.poles_order > 0)
225            else None
226        )
227
228        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
229        x_padded[self.zeros_order:] = x
230
231        
232
233        for k in range(n_samples):
234            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
235            
236            reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x))
237            
238            reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x))
239
240            y_out: float = float(np.dot(self.w, reg_y))
241            y_equation: float = float(np.dot(self.w, reg_e))
242
243            outputs[k] = y_out
244            errors[k] = float(d[k] - y_out)
245            errors_aux[k] = float(d[k] - y_equation)
246
247            psi: np.ndarray = self.Sd @ reg_e
248            den: float = float(self.forgetting_factor + reg_e.T @ psi)
249
250            self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den)
251            self.w += (self.Sd @ reg_e) * errors_aux[k]
252
253            if self.poles_order > 0:
254                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
255
256                if return_internal_states and a_track is not None:
257                    a_track[k, :] = self.w[: self.poles_order]
258
259                self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1]))
260                self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1]))
261
262            self._record_history()
263
264        runtime_s: float = float(time() - tic)
265        if verbose:
266            print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms")
267
268        extra: Dict[str, Any] = {"auxiliary_errors": errors_aux}
269        if return_internal_states:
270            extra["a_coefficients"] = a_track
271
272        return self._pack_results(
273            outputs=outputs,
274            errors=errors,
275            runtime_s=runtime_s,
276            error_type="equation_error",
277            extra=extra,
278        )

Executes the equation-error RLS adaptation loop.

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the time history of the feedback (pole) coefficients in result.extra["a_coefficients"] with shape (N, poles_order) (or None if poles_order == 0).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) "True IIR" output sequence y[k] computed with past outputs. - errors : ndarray of float, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "equation_error". - extra : dict Always includes: - "auxiliary_errors": ndarray of float, shape (N,) with the equation error e_eq[k] = d[k] - y_eq[k] used to drive the RLS update. Additionally includes "a_coefficients" if return_internal_states=True.

class GaussNewton(pydaptivefiltering.AdaptiveFilter):
 26class GaussNewton(AdaptiveFilter):
 27    """
 28    Gauss-Newton (recursive) output-error adaptation for IIR filters (real-valued).
 29
 30    This method targets the output-error (OE) criterion for IIR adaptive filtering,
 31    i.e., it adapts coefficients to reduce the squared error
 32    :math:`e(k) = d(k) - y(k)` where :math:`y(k)` is produced by the *recursive*
 33    (IIR) structure.
 34
 35    The Gauss-Newton idea is to approximate the Hessian of the OE cost by an
 36    outer-product model based on a sensitivity (Jacobian-like) vector
 37    :math:`\\phi(k)`. In this implementation, the associated inverse matrix
 38    (named ``Sd``) is updated recursively in an RLS-like fashion with an
 39    exponential smoothing factor ``alpha``. This yields faster convergence than
 40    plain gradient descent at the cost of roughly :math:`O((M+N)^2)` operations
 41    per sample.
 42
 43    This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation
 44    is restricted to **real-valued** signals (enforced by ``ensure_real_signals``).
 45
 46    Parameters
 47    ----------
 48    zeros_order : int
 49        Numerator order ``N`` (number of zeros). The feedforward part has
 50        ``N + 1`` coefficients.
 51    poles_order : int
 52        Denominator order ``M`` (number of poles). The feedback part has ``M``
 53        coefficients.
 54    alpha : float, optional
 55        Smoothing factor used in the recursive update of the inverse Hessian-like
 56        matrix ``Sd``. Must satisfy ``0 < alpha < 1``. Smaller values yield
 57        slower adaptation of ``Sd`` (more memory). Default is 0.05.
 58    step_size : float, optional
 59        Step size applied to the Gauss-Newton direction. Default is 1.0.
 60    delta : float, optional
 61        Positive regularization parameter for initializing ``Sd`` as
 62        :math:`S(0) = \\delta^{-1} I`. Default is 1e-3.
 63    w_init : array_like of float, optional
 64        Optional initial coefficient vector. If provided, it should have shape
 65        ``(M + N + 1,)`` following the parameter order described below. If None,
 66        the implementation initializes with zeros (and ignores ``w_init``).
 67
 68    Notes
 69    -----
 70    Parameterization (as implemented)
 71    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 72    The coefficient vector is arranged as:
 73
 74    - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``)
 75    - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``)
 76
 77    Regressor and OE error (as implemented)
 78    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 79    With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the
 80    last ``M`` outputs, the code forms:
 81
 82    .. math::
 83        \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T,
 84
 85    computes:
 86
 87    .. math::
 88        y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k),
 89
 90    and uses ``e(k)`` as the output-error signal reported in ``errors``.
 91
 92    Sensitivity vector and Gauss-Newton recursion
 93    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 94    The update direction is built from filtered sensitivity signals stored in
 95    internal buffers (``x_line_buffer`` and ``y_line_buffer``). The code forms:
 96
 97    .. math::
 98        \\phi(k) =
 99        [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\;
100         -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T.
101
102    Given ``psi = Sd * phi`` and the scalar denominator
103
104    .. math::
105        \\text{den}(k) = \\frac{1-\\alpha}{\\alpha} + \\phi^T(k)\\, Sd(k-1)\\, \\phi(k),
106
107    the inverse Hessian-like matrix is updated as:
108
109    .. math::
110        Sd(k) = \\frac{1}{1-\\alpha}\\left(Sd(k-1) - \\frac{\\psi(k)\\psi^T(k)}{\\text{den}(k)}\\right),
111
112    and the coefficient update is:
113
114    .. math::
115        w(k+1) = w(k) - \\mu\\, Sd(k)\\, \\phi(k)\\, e(k).
116
117    Stability procedure
118    ~~~~~~~~~~~~~~~~~~~
119    After each update, the feedback coefficients ``w[:M]`` are stabilized by
120    reflecting poles outside the unit circle back inside (pole reflection).
121
122    References
123    ----------
124    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
125       Implementation*, 3rd ed., Algorithm 10.1 (modified).
126    """
127
128    supports_complex: bool = False
129    zeros_order: int
130    poles_order: int
131    alpha: float
132    step_size: float
133    delta: float
134    n_coeffs: int
135    Sd: np.ndarray
136    y_buffer: np.ndarray
137    x_line_buffer: np.ndarray
138    y_line_buffer: np.ndarray
139
140    def __init__(
141        self,
142        zeros_order: int,
143        poles_order: int,
144        alpha: float = 0.05,
145        step_size: float = 1.0,
146        delta: float = 1e-3,
147        w_init: Optional[Union[np.ndarray, list]] = None,
148    ) -> None:
149        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
150
151        self.zeros_order = int(zeros_order)
152        self.poles_order = int(poles_order)
153        self.alpha = float(alpha)
154        self.step_size = float(step_size)
155        self.delta = float(delta)
156
157        self.n_coeffs = int(self.zeros_order + 1 + self.poles_order)
158        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
159
160        self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64)
161
162        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
163
164        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
165        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
166        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
167
168    def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray:
169        """
170        Reflects poles outside the unit circle back inside to maintain stability.
171        """
172        poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs))
173        poles: np.ndarray = np.roots(poly_coeffs)
174        mask: np.ndarray = np.abs(poles) > 1.0
175        if np.any(mask):
176            poles[mask] = 1.0 / np.conj(poles[mask])
177            new_poly: np.ndarray = np.poly(poles)
178            return -np.real(new_poly[1:])
179        return a_coeffs
180
181    @ensure_real_signals
182    @validate_input
183    def optimize(
184        self,
185        input_signal: np.ndarray,
186        desired_signal: np.ndarray,
187        verbose: bool = False,
188        return_internal_states: bool = False,
189    ) -> OptimizationResult:
190        """
191        Executes the (recursive) Gauss-Newton OE adaptation loop.
192
193        Parameters
194        ----------
195        input_signal : array_like of float
196            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
197        desired_signal : array_like of float
198            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
199            Must have the same length as ``input_signal``.
200        verbose : bool, optional
201            If True, prints the total runtime after completion.
202        return_internal_states : bool, optional
203            If True, includes sensitivity trajectories in ``result.extra``:
204            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
205              scalar sensitivity signal :math:`\\underline{x}(k)`.
206            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
207              scalar sensitivity signal :math:`\\underline{y}(k)`.
208
209        Returns
210        -------
211        OptimizationResult
212            Result object with fields:
213            - outputs : ndarray of float, shape ``(N,)``
214                Output sequence ``y[k]`` produced by the current IIR structure.
215            - errors : ndarray of float, shape ``(N,)``
216                Output error sequence ``e[k] = d[k] - y[k]``.
217            - coefficients : ndarray of float
218                Coefficient history recorded by the base class.
219            - error_type : str
220                Set to ``"output_error"``.
221            - extra : dict
222                Empty unless ``return_internal_states=True``.
223        """
224        tic: float = time()
225
226        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
227        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
228        n_samples: int = int(x.size)
229
230        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
231        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
232
233        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
234        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
235
236        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
237        x_padded[self.zeros_order:] = x
238
239        inv_alpha: float = float(1.0 - self.alpha)
240        alpha_ratio: float = float(inv_alpha / self.alpha)
241
242        
243
244        for k in range(n_samples):
245            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
246            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
247
248            y_k: float = float(np.dot(self.w, regressor))
249            outputs[k] = y_k
250            e_k: float = float(d[k] - y_k)
251            errors[k] = e_k
252
253            a_coeffs: np.ndarray = self.w[: self.poles_order]
254            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
255
256            y_line_k: float = 0.0
257            if self.poles_order > 0:
258                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
259                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
260
261            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
262            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
263
264            if return_internal_states and x_line_track is not None:
265                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
266
267            phi: np.ndarray = np.concatenate(
268                (
269                    self.y_line_buffer[: self.poles_order],
270                    -self.x_line_buffer[: self.zeros_order + 1],
271                )
272            )
273
274            psi: np.ndarray = self.Sd @ phi
275            den: float = float(alpha_ratio + phi.T @ psi)
276
277            self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den)
278            self.w -= self.step_size * (self.Sd @ phi) * e_k
279
280            if self.poles_order > 0:
281                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
282                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
283
284            self._record_history()
285
286        runtime_s: float = float(time() - tic)
287        if verbose:
288            print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms")
289
290        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
291
292        return self._pack_results(
293            outputs=outputs,
294            errors=errors,
295            runtime_s=runtime_s,
296            error_type="output_error",
297            extra=extra,
298        )

Gauss-Newton (recursive) output-error adaptation for IIR filters (real-valued).

This method targets the output-error (OE) criterion for IIR adaptive filtering, i.e., it adapts coefficients to reduce the squared error \( e(k) = d(k) - y(k) \) where \( y(k) \) is produced by the recursive (IIR) structure.

The Gauss-Newton idea is to approximate the Hessian of the OE cost by an outer-product model based on a sensitivity (Jacobian-like) vector \( \phi(k) \). In this implementation, the associated inverse matrix (named Sd) is updated recursively in an RLS-like fashion with an exponential smoothing factor alpha. This yields faster convergence than plain gradient descent at the cost of roughly \( O((M+N)^2) \) operations per sample.

This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation is restricted to real-valued signals (enforced by ensure_real_signals).

Parameters

zeros_order : int Numerator order N (number of zeros). The feedforward part has N + 1 coefficients. poles_order : int Denominator order M (number of poles). The feedback part has M coefficients. alpha : float, optional Smoothing factor used in the recursive update of the inverse Hessian-like matrix Sd. Must satisfy 0 < alpha < 1. Smaller values yield slower adaptation of Sd (more memory). Default is 0.05. step_size : float, optional Step size applied to the Gauss-Newton direction. Default is 1.0. delta : float, optional Positive regularization parameter for initializing Sd as \( S(0) = \delta^{-1} I \). Default is 1e-3. w_init : array_like of float, optional Optional initial coefficient vector. If provided, it should have shape (M + N + 1,) following the parameter order described below. If None, the implementation initializes with zeros (and ignores w_init).

Notes

Parameterization (as implemented) ~~~~~~~~~ The coefficient vector is arranged as:

  • w[:M]: feedback (pole) coefficients (often denoted a)
  • w[M:]: feedforward (zero) coefficients (often denoted b)

Regressor and OE error (as implemented) ~~~~~~~~~~~ With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the last M outputs, the code forms:

$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$

computes:

$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k),$$

and uses e(k) as the output-error signal reported in errors.

Sensitivity vector and Gauss-Newton recursion ~~~~~~~~~ The update direction is built from filtered sensitivity signals stored in internal buffers (x_line_buffer and y_line_buffer). The code forms:

$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T.$$

Given psi = Sd * phi and the scalar denominator

$$\text{den}(k) = \frac{1-\alpha}{\alpha} + \phi^T(k)\, Sd(k-1)\, \phi(k),$$

the inverse Hessian-like matrix is updated as:

$$Sd(k) = \frac{1}{1-\alpha}\left(Sd(k-1) - \frac{\psi(k)\psi^T(k)}{\text{den}(k)}\right),$$

and the coefficient update is:

$$w(k+1) = w(k) - \mu\, Sd(k)\, \phi(k)\, e(k).$$

Stability procedure ~~~~~~~ After each update, the feedback coefficients w[:M] are stabilized by reflecting poles outside the unit circle back inside (pole reflection).

References


GaussNewton( zeros_order: int, poles_order: int, alpha: float = 0.05, step_size: float = 1.0, delta: float = 0.001, w_init: Union[numpy.ndarray, list, NoneType] = None)
140    def __init__(
141        self,
142        zeros_order: int,
143        poles_order: int,
144        alpha: float = 0.05,
145        step_size: float = 1.0,
146        delta: float = 1e-3,
147        w_init: Optional[Union[np.ndarray, list]] = None,
148    ) -> None:
149        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
150
151        self.zeros_order = int(zeros_order)
152        self.poles_order = int(poles_order)
153        self.alpha = float(alpha)
154        self.step_size = float(step_size)
155        self.delta = float(delta)
156
157        self.n_coeffs = int(self.zeros_order + 1 + self.poles_order)
158        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
159
160        self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64)
161
162        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
163
164        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
165        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
166        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
supports_complex: bool = False
zeros_order: int
poles_order: int
alpha: float
step_size: float
delta: float
n_coeffs: int
Sd: numpy.ndarray
y_buffer: numpy.ndarray
x_line_buffer: numpy.ndarray
y_line_buffer: numpy.ndarray
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
181    @ensure_real_signals
182    @validate_input
183    def optimize(
184        self,
185        input_signal: np.ndarray,
186        desired_signal: np.ndarray,
187        verbose: bool = False,
188        return_internal_states: bool = False,
189    ) -> OptimizationResult:
190        """
191        Executes the (recursive) Gauss-Newton OE adaptation loop.
192
193        Parameters
194        ----------
195        input_signal : array_like of float
196            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
197        desired_signal : array_like of float
198            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
199            Must have the same length as ``input_signal``.
200        verbose : bool, optional
201            If True, prints the total runtime after completion.
202        return_internal_states : bool, optional
203            If True, includes sensitivity trajectories in ``result.extra``:
204            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
205              scalar sensitivity signal :math:`\\underline{x}(k)`.
206            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
207              scalar sensitivity signal :math:`\\underline{y}(k)`.
208
209        Returns
210        -------
211        OptimizationResult
212            Result object with fields:
213            - outputs : ndarray of float, shape ``(N,)``
214                Output sequence ``y[k]`` produced by the current IIR structure.
215            - errors : ndarray of float, shape ``(N,)``
216                Output error sequence ``e[k] = d[k] - y[k]``.
217            - coefficients : ndarray of float
218                Coefficient history recorded by the base class.
219            - error_type : str
220                Set to ``"output_error"``.
221            - extra : dict
222                Empty unless ``return_internal_states=True``.
223        """
224        tic: float = time()
225
226        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
227        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
228        n_samples: int = int(x.size)
229
230        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
231        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
232
233        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
234        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
235
236        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
237        x_padded[self.zeros_order:] = x
238
239        inv_alpha: float = float(1.0 - self.alpha)
240        alpha_ratio: float = float(inv_alpha / self.alpha)
241
242        
243
244        for k in range(n_samples):
245            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
246            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
247
248            y_k: float = float(np.dot(self.w, regressor))
249            outputs[k] = y_k
250            e_k: float = float(d[k] - y_k)
251            errors[k] = e_k
252
253            a_coeffs: np.ndarray = self.w[: self.poles_order]
254            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
255
256            y_line_k: float = 0.0
257            if self.poles_order > 0:
258                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
259                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
260
261            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
262            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
263
264            if return_internal_states and x_line_track is not None:
265                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
266
267            phi: np.ndarray = np.concatenate(
268                (
269                    self.y_line_buffer[: self.poles_order],
270                    -self.x_line_buffer[: self.zeros_order + 1],
271                )
272            )
273
274            psi: np.ndarray = self.Sd @ phi
275            den: float = float(alpha_ratio + phi.T @ psi)
276
277            self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den)
278            self.w -= self.step_size * (self.Sd @ phi) * e_k
279
280            if self.poles_order > 0:
281                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
282                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
283
284            self._record_history()
285
286        runtime_s: float = float(time() - tic)
287        if verbose:
288            print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms")
289
290        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
291
292        return self._pack_results(
293            outputs=outputs,
294            errors=errors,
295            runtime_s=runtime_s,
296            error_type="output_error",
297            extra=extra,
298        )

Executes the (recursive) Gauss-Newton OE adaptation loop.

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes sensitivity trajectories in result.extra: - "x_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{x}(k) \). - "y_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{y}(k) \).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Output sequence y[k] produced by the current IIR structure. - errors : ndarray of float, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "output_error". - extra : dict Empty unless return_internal_states=True.

class GaussNewtonGradient(pydaptivefiltering.AdaptiveFilter):
 26class GaussNewtonGradient(AdaptiveFilter):
 27    """
 28    Gradient-based Gauss-Newton (output-error) adaptation for IIR filters (real-valued).
 29
 30    This method targets the output-error (OE) criterion for IIR adaptive filtering,
 31    i.e., it adapts the coefficients to minimize the squared error
 32    :math:`e(k) = d(k) - y(k)` where :math:`y(k)` is produced by the *recursive*
 33    (IIR) structure.
 34
 35    Compared to the classical Gauss-Newton approach, this implementation uses a
 36    simplified *gradient* update (no matrix inversions) while still leveraging
 37    filtered sensitivity signals to approximate how the output changes with
 38    respect to pole/zero coefficients.
 39
 40    This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation
 41    is restricted to **real-valued** signals (enforced by ``ensure_real_signals``).
 42
 43    Parameters
 44    ----------
 45    zeros_order : int
 46        Numerator order ``N`` (number of zeros). The feedforward part has
 47        ``N + 1`` coefficients.
 48    poles_order : int
 49        Denominator order ``M`` (number of poles). The feedback part has ``M``
 50        coefficients.
 51    step_size : float, optional
 52        Adaptation step size ``mu``. Default is 1e-3.
 53    w_init : array_like of float, optional
 54        Optional initial coefficient vector. If provided, it should have shape
 55        ``(M + N + 1,)`` following the parameter order described below. If None,
 56        the implementation initializes with zeros (and ignores ``w_init``).
 57
 58    Notes
 59    -----
 60    Parameterization (as implemented)
 61    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 62    The coefficient vector is arranged as:
 63
 64    - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``)
 65    - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``)
 66
 67    Regressor and output (as implemented)
 68    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 69    With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the
 70    last ``M`` outputs, this implementation forms:
 71
 72    .. math::
 73        \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T,
 74
 75    and computes the (recursive) output used by the OE criterion as:
 76
 77    .. math::
 78        y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k).
 79
 80    Sensitivity-based gradient factor
 81    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 82    The update direction is built from filtered sensitivity signals stored in
 83    internal buffers (``x_line_buffer`` and ``y_line_buffer``). The code forms:
 84
 85    .. math::
 86        \\phi(k) =
 87        [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\;
 88         -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T,
 89
 90    and applies the per-sample gradient step:
 91
 92    .. math::
 93        w(k+1) = w(k) - \\mu\\, \\phi(k)\\, e(k).
 94
 95    Stability procedure
 96    ~~~~~~~~~~~~~~~~~~~
 97    After each update, the feedback coefficients ``w[:M]`` are stabilized by
 98    reflecting poles outside the unit circle back inside (pole reflection).
 99
100    References
101    ----------
102    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
103       Implementation*, 3rd ed., Algorithm 10.1 (modified).
104    """
105
106    supports_complex: bool = False
107    zeros_order: int
108    poles_order: int
109    step_size: float
110    n_coeffs: int
111    y_buffer: np.ndarray
112    x_line_buffer: np.ndarray
113    y_line_buffer: np.ndarray
114
115    def __init__(
116        self,
117        zeros_order: int,
118        poles_order: int,
119        step_size: float = 1e-3,
120        w_init: Optional[Union[np.ndarray, list]] = None,
121    ) -> None:
122        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
123
124        self.zeros_order = int(zeros_order)
125        self.poles_order = int(poles_order)
126        self.step_size = float(step_size)
127
128        self.n_coeffs = int(self.zeros_order + self.poles_order + 1)
129        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
130
131        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
132
133        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
134        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
135        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
136
137    def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray:
138        """
139        Enforces IIR stability by reflecting poles outside the unit circle back inside.
140        Essential for preventing divergence during the gradient descent update.
141        """
142        poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs))
143        poles: np.ndarray = np.roots(poly_coeffs)
144        mask: np.ndarray = np.abs(poles) > 1.0
145        
146        if np.any(mask):
147            poles[mask] = 1.0 / np.conj(poles[mask])
148            new_poly: np.ndarray = np.poly(poles)
149            return -np.real(new_poly[1:])
150        return a_coeffs
151
152    @ensure_real_signals
153    @validate_input
154    def optimize(
155        self,
156        input_signal: np.ndarray,
157        desired_signal: np.ndarray,
158        verbose: bool = False,
159        return_internal_states: bool = False,
160    ) -> OptimizationResult:
161        """
162        Executes the gradient-based Gauss-Newton (OE) adaptation loop.
163
164        Parameters
165        ----------
166        input_signal : array_like of float
167            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
168        desired_signal : array_like of float
169            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
170            Must have the same length as ``input_signal``.
171        verbose : bool, optional
172            If True, prints the total runtime after completion.
173        return_internal_states : bool, optional
174            If True, includes sensitivity trajectories in ``result.extra``:
175            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
176              scalar sensitivity signal :math:`\\underline{x}(k)` produced by
177              the recursion in the code.
178            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
179              scalar sensitivity signal :math:`\\underline{y}(k)` produced by
180              the recursion in the code.
181
182        Returns
183        -------
184        OptimizationResult
185            Result object with fields:
186            - outputs : ndarray of float, shape ``(N,)``
187                Output sequence ``y[k]`` produced by the current IIR structure.
188            - errors : ndarray of float, shape ``(N,)``
189                Output error sequence ``e[k] = d[k] - y[k]``.
190            - coefficients : ndarray of float
191                Coefficient history recorded by the base class.
192            - error_type : str
193                Set to ``"output_error"``.
194            - extra : dict
195                Empty unless ``return_internal_states=True``.
196        """
197        tic: float = time()
198
199        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
200        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
201        n_samples: int = int(x.size)
202
203        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
204        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
205
206        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
207        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
208
209        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
210        x_padded[self.zeros_order:] = x
211
212        for k in range(n_samples):
213            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
214            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
215
216            y_k: float = float(np.dot(self.w, regressor))
217            outputs[k] = y_k
218            e_k: float = float(d[k] - y_k)
219            errors[k] = e_k
220
221            a_coeffs: np.ndarray = self.w[: self.poles_order]
222            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
223
224            y_line_k: float = 0.0
225            if self.poles_order > 0:
226                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
227                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
228
229            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
230            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
231
232            if return_internal_states and x_line_track is not None:
233                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
234
235            phi: np.ndarray = np.concatenate(
236                (
237                    self.y_line_buffer[: self.poles_order],
238                    -self.x_line_buffer[: self.zeros_order + 1],
239                )
240            )
241
242            self.w -= self.step_size * phi * e_k
243
244            if self.poles_order > 0:
245                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
246                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
247
248            self._record_history()
249
250        runtime_s: float = float(time() - tic)
251        if verbose:
252            print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms")
253
254        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
255
256        return self._pack_results(
257            outputs=outputs,
258            errors=errors,
259            runtime_s=runtime_s,
260            error_type="output_error",
261            extra=extra,
262        )

Gradient-based Gauss-Newton (output-error) adaptation for IIR filters (real-valued).

This method targets the output-error (OE) criterion for IIR adaptive filtering, i.e., it adapts the coefficients to minimize the squared error \( e(k) = d(k) - y(k) \) where \( y(k) \) is produced by the recursive (IIR) structure.

Compared to the classical Gauss-Newton approach, this implementation uses a simplified gradient update (no matrix inversions) while still leveraging filtered sensitivity signals to approximate how the output changes with respect to pole/zero coefficients.

This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation is restricted to real-valued signals (enforced by ensure_real_signals).

Parameters

zeros_order : int Numerator order N (number of zeros). The feedforward part has N + 1 coefficients. poles_order : int Denominator order M (number of poles). The feedback part has M coefficients. step_size : float, optional Adaptation step size mu. Default is 1e-3. w_init : array_like of float, optional Optional initial coefficient vector. If provided, it should have shape (M + N + 1,) following the parameter order described below. If None, the implementation initializes with zeros (and ignores w_init).

Notes

Parameterization (as implemented) ~~~~~~~~~ The coefficient vector is arranged as:

  • w[:M]: feedback (pole) coefficients (often denoted a)
  • w[M:]: feedforward (zero) coefficients (often denoted b)

Regressor and output (as implemented) ~~~~~~~~~ With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the last M outputs, this implementation forms:

$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$

and computes the (recursive) output used by the OE criterion as:

$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k).$$

Sensitivity-based gradient factor ~~~~~~~~~ The update direction is built from filtered sensitivity signals stored in internal buffers (x_line_buffer and y_line_buffer). The code forms:

$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T,$$

and applies the per-sample gradient step:

$$w(k+1) = w(k) - \mu\, \phi(k)\, e(k).$$

Stability procedure ~~~~~~~ After each update, the feedback coefficients w[:M] are stabilized by reflecting poles outside the unit circle back inside (pole reflection).

References


GaussNewtonGradient( zeros_order: int, poles_order: int, step_size: float = 0.001, w_init: Union[numpy.ndarray, list, NoneType] = None)
115    def __init__(
116        self,
117        zeros_order: int,
118        poles_order: int,
119        step_size: float = 1e-3,
120        w_init: Optional[Union[np.ndarray, list]] = None,
121    ) -> None:
122        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
123
124        self.zeros_order = int(zeros_order)
125        self.poles_order = int(poles_order)
126        self.step_size = float(step_size)
127
128        self.n_coeffs = int(self.zeros_order + self.poles_order + 1)
129        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
130
131        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
132
133        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
134        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
135        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
supports_complex: bool = False
zeros_order: int
poles_order: int
step_size: float
n_coeffs: int
y_buffer: numpy.ndarray
x_line_buffer: numpy.ndarray
y_line_buffer: numpy.ndarray
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
152    @ensure_real_signals
153    @validate_input
154    def optimize(
155        self,
156        input_signal: np.ndarray,
157        desired_signal: np.ndarray,
158        verbose: bool = False,
159        return_internal_states: bool = False,
160    ) -> OptimizationResult:
161        """
162        Executes the gradient-based Gauss-Newton (OE) adaptation loop.
163
164        Parameters
165        ----------
166        input_signal : array_like of float
167            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
168        desired_signal : array_like of float
169            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
170            Must have the same length as ``input_signal``.
171        verbose : bool, optional
172            If True, prints the total runtime after completion.
173        return_internal_states : bool, optional
174            If True, includes sensitivity trajectories in ``result.extra``:
175            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
176              scalar sensitivity signal :math:`\\underline{x}(k)` produced by
177              the recursion in the code.
178            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
179              scalar sensitivity signal :math:`\\underline{y}(k)` produced by
180              the recursion in the code.
181
182        Returns
183        -------
184        OptimizationResult
185            Result object with fields:
186            - outputs : ndarray of float, shape ``(N,)``
187                Output sequence ``y[k]`` produced by the current IIR structure.
188            - errors : ndarray of float, shape ``(N,)``
189                Output error sequence ``e[k] = d[k] - y[k]``.
190            - coefficients : ndarray of float
191                Coefficient history recorded by the base class.
192            - error_type : str
193                Set to ``"output_error"``.
194            - extra : dict
195                Empty unless ``return_internal_states=True``.
196        """
197        tic: float = time()
198
199        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
200        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
201        n_samples: int = int(x.size)
202
203        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
204        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
205
206        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
207        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
208
209        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
210        x_padded[self.zeros_order:] = x
211
212        for k in range(n_samples):
213            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
214            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
215
216            y_k: float = float(np.dot(self.w, regressor))
217            outputs[k] = y_k
218            e_k: float = float(d[k] - y_k)
219            errors[k] = e_k
220
221            a_coeffs: np.ndarray = self.w[: self.poles_order]
222            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
223
224            y_line_k: float = 0.0
225            if self.poles_order > 0:
226                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
227                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
228
229            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
230            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
231
232            if return_internal_states and x_line_track is not None:
233                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
234
235            phi: np.ndarray = np.concatenate(
236                (
237                    self.y_line_buffer[: self.poles_order],
238                    -self.x_line_buffer[: self.zeros_order + 1],
239                )
240            )
241
242            self.w -= self.step_size * phi * e_k
243
244            if self.poles_order > 0:
245                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
246                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
247
248            self._record_history()
249
250        runtime_s: float = float(time() - tic)
251        if verbose:
252            print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms")
253
254        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
255
256        return self._pack_results(
257            outputs=outputs,
258            errors=errors,
259            runtime_s=runtime_s,
260            error_type="output_error",
261            extra=extra,
262        )

Executes the gradient-based Gauss-Newton (OE) adaptation loop.

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes sensitivity trajectories in result.extra: - "x_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{x}(k) \) produced by the recursion in the code. - "y_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{y}(k) \) produced by the recursion in the code.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Output sequence y[k] produced by the current IIR structure. - errors : ndarray of float, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "output_error". - extra : dict Empty unless return_internal_states=True.

class RLSIIR(pydaptivefiltering.AdaptiveFilter):
 27class RLSIIR(AdaptiveFilter):
 28    """
 29    RLS-like output-error adaptation for IIR filters (real-valued).
 30
 31    This algorithm applies an RLS-style recursion to the IIR output-error (OE)
 32    problem. Rather than minimizing a linear FIR error, it uses filtered
 33    sensitivity signals to build a Jacobian-like vector :math:`\\phi(k)` that
 34    approximates how the IIR output changes with respect to the pole/zero
 35    parameters. The inverse correlation matrix (named ``Sd``) scales the update,
 36    typically yielding faster convergence than plain gradient methods.
 37
 38    The implementation corresponds to a modified form of Diniz (3rd ed.,
 39    Alg. 10.1) and is restricted to **real-valued** signals (enforced by
 40    ``ensure_real_signals``).
 41
 42    Parameters
 43    ----------
 44    zeros_order : int
 45        Numerator order ``N`` (number of zeros). The feedforward part has
 46        ``N + 1`` coefficients.
 47    poles_order : int
 48        Denominator order ``M`` (number of poles). The feedback part has ``M``
 49        coefficients.
 50    forgetting_factor : float, optional
 51        Exponential forgetting factor ``lambda`` used in the recursive update of
 52        ``Sd``. Typical values are in ``[0.9, 1.0]``. Default is 0.99.
 53    delta : float, optional
 54        Positive regularization parameter for initializing ``Sd`` as
 55        :math:`S(0) = \\delta^{-1} I`. Default is 1e-3.
 56    w_init : array_like of float, optional
 57        Optional initial coefficient vector. If provided, it should have shape
 58        ``(M + N + 1,)`` following the parameter order described below. If None,
 59        the implementation initializes with zeros (and ignores ``w_init``).
 60
 61    Notes
 62    -----
 63    Parameterization (as implemented)
 64    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 65    The coefficient vector is arranged as:
 66
 67    - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``)
 68    - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``)
 69
 70    OE output and error (as implemented)
 71    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 72    With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the
 73    last ``M`` outputs, the code forms:
 74
 75    .. math::
 76        \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T,
 77
 78    computes:
 79
 80    .. math::
 81        y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k),
 82
 83    and reports ``e(k)`` as the output-error sequence.
 84
 85    Sensitivity vector and RLS recursion
 86    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 87    Filtered sensitivity signals stored in internal buffers (``x_line_buffer``
 88    and ``y_line_buffer``) are used to build:
 89
 90    .. math::
 91        \\phi(k) =
 92        [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\;
 93         -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T.
 94
 95    The inverse correlation matrix ``Sd`` is updated in an RLS-like manner:
 96
 97    .. math::
 98        \\psi(k) = Sd(k-1)\\, \\phi(k), \\quad
 99        \\text{den}(k) = \\lambda + \\phi^T(k)\\, \\psi(k),
100
101    .. math::
102        Sd(k) = \\frac{1}{\\lambda}
103                \\left(Sd(k-1) - \\frac{\\psi(k)\\psi^T(k)}{\\text{den}(k)}\\right).
104
105    The coefficient update used here is:
106
107    .. math::
108        w(k+1) = w(k) - Sd(k)\\, \\phi(k)\\, e(k).
109
110    (Note: this implementation does not expose an additional step-size parameter;
111    the effective step is governed by ``Sd``.)
112
113    Stability procedure
114    ~~~~~~~~~~~~~~~~~~~
115    After each update, the feedback coefficients ``w[:M]`` are stabilized by
116    reflecting poles outside the unit circle back inside (pole reflection).
117
118    References
119    ----------
120    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
121       Implementation*, 3rd ed., Algorithm 10.1 (modified).
122    """
123
124    supports_complex: bool = False
125    zeros_order: int
126    poles_order: int
127    forgetting_factor: float
128    delta: float
129    n_coeffs: int
130    Sd: np.ndarray
131    y_buffer: np.ndarray
132    x_line_buffer: np.ndarray
133    y_line_buffer: np.ndarray
134
135    def __init__(
136        self,
137        zeros_order: int,
138        poles_order: int,
139        forgetting_factor: float = 0.99,
140        delta: float = 1e-3,
141        w_init: Optional[Union[np.ndarray, list]] = None,
142    ) -> None:
143        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
144
145        self.zeros_order = int(zeros_order)
146        self.poles_order = int(poles_order)
147        self.forgetting_factor = float(forgetting_factor)
148        self.delta = float(delta)
149
150        self.n_coeffs = int(self.zeros_order + self.poles_order + 1)
151        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
152
153        self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64)
154
155        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
156
157        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
158        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
159        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
160
161    def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray:
162        """
163        Enforces IIR stability by reflecting poles outside the unit circle back inside.
164        """
165        poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs))
166        poles: np.ndarray = np.roots(poly_coeffs)
167        mask: np.ndarray = np.abs(poles) > 1.0
168        if np.any(mask):
169            poles[mask] = 1.0 / np.conj(poles[mask])
170            new_poly: np.ndarray = np.poly(poles)
171            return -np.real(new_poly[1:])
172        return a_coeffs
173
174    @ensure_real_signals
175    @validate_input
176    def optimize(
177        self,
178        input_signal: np.ndarray,
179        desired_signal: np.ndarray,
180        verbose: bool = False,
181        return_internal_states: bool = False,
182    ) -> OptimizationResult:
183        """
184        Executes the RLS-IIR (OE) adaptation loop.
185
186        Parameters
187        ----------
188        input_signal : array_like of float
189            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
190        desired_signal : array_like of float
191            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
192            Must have the same length as ``input_signal``.
193        verbose : bool, optional
194            If True, prints the total runtime after completion.
195        return_internal_states : bool, optional
196            If True, includes sensitivity trajectories in ``result.extra``:
197            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
198              scalar sensitivity signal :math:`\\underline{x}(k)`.
199            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
200              scalar sensitivity signal :math:`\\underline{y}(k)`.
201
202        Returns
203        -------
204        OptimizationResult
205            Result object with fields:
206            - outputs : ndarray of float, shape ``(N,)``
207                Output sequence ``y[k]`` produced by the current IIR structure.
208            - errors : ndarray of float, shape ``(N,)``
209                Output error sequence ``e[k] = d[k] - y[k]``.
210            - coefficients : ndarray of float
211                Coefficient history recorded by the base class.
212            - error_type : str
213                Set to ``"output_error"``.
214            - extra : dict
215                Empty unless ``return_internal_states=True``.
216        """
217        tic: float = time()
218
219        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
220        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
221        n_samples: int = int(x.size)
222
223        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
224        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
225
226        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
227        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
228
229        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
230        x_padded[self.zeros_order:] = x
231
232        
233
234        for k in range(n_samples):
235            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
236            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
237
238            y_k: float = float(np.dot(self.w, regressor))
239            outputs[k] = y_k
240            e_k: float = float(d[k] - y_k)
241            errors[k] = e_k
242
243            a_coeffs: np.ndarray = self.w[: self.poles_order]
244            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
245
246            y_line_k: float = 0.0
247            if self.poles_order > 0:
248                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
249                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
250
251            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
252            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
253
254            if return_internal_states and x_line_track is not None:
255                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
256
257            phi: np.ndarray = np.concatenate(
258                (
259                    self.y_line_buffer[: self.poles_order],
260                    -self.x_line_buffer[: self.zeros_order + 1],
261                )
262            )
263
264            psi: np.ndarray = self.Sd @ phi
265            den: float = float(self.forgetting_factor + phi.T @ psi)
266            
267            self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den)
268
269            self.w -= (self.Sd @ phi) * e_k
270
271            if self.poles_order > 0:
272                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
273                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
274
275            self._record_history()
276
277        runtime_s: float = float(time() - tic)
278        if verbose:
279            print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms")
280
281        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
282
283        return self._pack_results(
284            outputs=outputs,
285            errors=errors,
286            runtime_s=runtime_s,
287            error_type="output_error",
288            extra=extra,
289        )

RLS-like output-error adaptation for IIR filters (real-valued).

This algorithm applies an RLS-style recursion to the IIR output-error (OE) problem. Rather than minimizing a linear FIR error, it uses filtered sensitivity signals to build a Jacobian-like vector \( \phi(k) \) that approximates how the IIR output changes with respect to the pole/zero parameters. The inverse correlation matrix (named Sd) scales the update, typically yielding faster convergence than plain gradient methods.

The implementation corresponds to a modified form of Diniz (3rd ed., Alg. 10.1) and is restricted to real-valued signals (enforced by ensure_real_signals).

Parameters

zeros_order : int Numerator order N (number of zeros). The feedforward part has N + 1 coefficients. poles_order : int Denominator order M (number of poles). The feedback part has M coefficients. forgetting_factor : float, optional Exponential forgetting factor lambda used in the recursive update of Sd. Typical values are in [0.9, 1.0]. Default is 0.99. delta : float, optional Positive regularization parameter for initializing Sd as \( S(0) = \delta^{-1} I \). Default is 1e-3. w_init : array_like of float, optional Optional initial coefficient vector. If provided, it should have shape (M + N + 1,) following the parameter order described below. If None, the implementation initializes with zeros (and ignores w_init).

Notes

Parameterization (as implemented) ~~~~~~~~~ The coefficient vector is arranged as:

  • w[:M]: feedback (pole) coefficients (often denoted a)
  • w[M:]: feedforward (zero) coefficients (often denoted b)

OE output and error (as implemented) ~~~~~~~~ With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the last M outputs, the code forms:

$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$

computes:

$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k),$$

and reports e(k) as the output-error sequence.

Sensitivity vector and RLS recursion ~~~~~~~~ Filtered sensitivity signals stored in internal buffers (x_line_buffer and y_line_buffer) are used to build:

$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T.$$

The inverse correlation matrix Sd is updated in an RLS-like manner:

$$\psi(k) = Sd(k-1)\, \phi(k), \quad \text{den}(k) = \lambda + \phi^T(k)\, \psi(k),$$

$$Sd(k) = \frac{1}{\lambda} \left(Sd(k-1) - \frac{\psi(k)\psi^T(k)}{\text{den}(k)}\right).$$

The coefficient update used here is:

$$w(k+1) = w(k) - Sd(k)\, \phi(k)\, e(k).$$

(Note: this implementation does not expose an additional step-size parameter; the effective step is governed by Sd.)

Stability procedure ~~~~~~~ After each update, the feedback coefficients w[:M] are stabilized by reflecting poles outside the unit circle back inside (pole reflection).

References


RLSIIR( zeros_order: int, poles_order: int, forgetting_factor: float = 0.99, delta: float = 0.001, w_init: Union[numpy.ndarray, list, NoneType] = None)
135    def __init__(
136        self,
137        zeros_order: int,
138        poles_order: int,
139        forgetting_factor: float = 0.99,
140        delta: float = 1e-3,
141        w_init: Optional[Union[np.ndarray, list]] = None,
142    ) -> None:
143        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
144
145        self.zeros_order = int(zeros_order)
146        self.poles_order = int(poles_order)
147        self.forgetting_factor = float(forgetting_factor)
148        self.delta = float(delta)
149
150        self.n_coeffs = int(self.zeros_order + self.poles_order + 1)
151        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
152
153        self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64)
154
155        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
156
157        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order))
158        self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64)
159        self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
supports_complex: bool = False
zeros_order: int
poles_order: int
forgetting_factor: float
delta: float
n_coeffs: int
Sd: numpy.ndarray
y_buffer: numpy.ndarray
x_line_buffer: numpy.ndarray
y_line_buffer: numpy.ndarray
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
174    @ensure_real_signals
175    @validate_input
176    def optimize(
177        self,
178        input_signal: np.ndarray,
179        desired_signal: np.ndarray,
180        verbose: bool = False,
181        return_internal_states: bool = False,
182    ) -> OptimizationResult:
183        """
184        Executes the RLS-IIR (OE) adaptation loop.
185
186        Parameters
187        ----------
188        input_signal : array_like of float
189            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
190        desired_signal : array_like of float
191            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
192            Must have the same length as ``input_signal``.
193        verbose : bool, optional
194            If True, prints the total runtime after completion.
195        return_internal_states : bool, optional
196            If True, includes sensitivity trajectories in ``result.extra``:
197            - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the
198              scalar sensitivity signal :math:`\\underline{x}(k)`.
199            - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the
200              scalar sensitivity signal :math:`\\underline{y}(k)`.
201
202        Returns
203        -------
204        OptimizationResult
205            Result object with fields:
206            - outputs : ndarray of float, shape ``(N,)``
207                Output sequence ``y[k]`` produced by the current IIR structure.
208            - errors : ndarray of float, shape ``(N,)``
209                Output error sequence ``e[k] = d[k] - y[k]``.
210            - coefficients : ndarray of float
211                Coefficient history recorded by the base class.
212            - error_type : str
213                Set to ``"output_error"``.
214            - extra : dict
215                Empty unless ``return_internal_states=True``.
216        """
217        tic: float = time()
218
219        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
220        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
221        n_samples: int = int(x.size)
222
223        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
224        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
225
226        x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
227        y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None
228
229        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
230        x_padded[self.zeros_order:] = x
231
232        
233
234        for k in range(n_samples):
235            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
236            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
237
238            y_k: float = float(np.dot(self.w, regressor))
239            outputs[k] = y_k
240            e_k: float = float(d[k] - y_k)
241            errors[k] = e_k
242
243            a_coeffs: np.ndarray = self.w[: self.poles_order]
244            x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order]))
245
246            y_line_k: float = 0.0
247            if self.poles_order > 0:
248                prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0
249                y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order]))
250
251            self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1]))
252            self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1]))
253
254            if return_internal_states and x_line_track is not None:
255                x_line_track[k], y_line_track[k] = x_line_k, y_line_k
256
257            phi: np.ndarray = np.concatenate(
258                (
259                    self.y_line_buffer[: self.poles_order],
260                    -self.x_line_buffer[: self.zeros_order + 1],
261                )
262            )
263
264            psi: np.ndarray = self.Sd @ phi
265            den: float = float(self.forgetting_factor + phi.T @ psi)
266            
267            self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den)
268
269            self.w -= (self.Sd @ phi) * e_k
270
271            if self.poles_order > 0:
272                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
273                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
274
275            self._record_history()
276
277        runtime_s: float = float(time() - tic)
278        if verbose:
279            print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms")
280
281        extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {}
282
283        return self._pack_results(
284            outputs=outputs,
285            errors=errors,
286            runtime_s=runtime_s,
287            error_type="output_error",
288            extra=extra,
289        )

Executes the RLS-IIR (OE) adaptation loop.

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes sensitivity trajectories in result.extra: - "x_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{x}(k) \). - "y_sensitivity": ndarray of float, shape (N,) with the scalar sensitivity signal \( \underline{y}(k) \).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Output sequence y[k] produced by the current IIR structure. - errors : ndarray of float, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "output_error". - extra : dict Empty unless return_internal_states=True.

class SteiglitzMcBride(pydaptivefiltering.AdaptiveFilter):
 26class SteiglitzMcBride(AdaptiveFilter):
 27    """
 28    Steiglitz–McBride (SM) adaptive algorithm for IIR filters (real-valued).
 29
 30    The Steiglitz–McBride method is an iterative output-error (OE) approach
 31    implemented via a sequence of *prefiltered equation-error* updates. The key
 32    idea is to prefilter both the input ``x[k]`` and the desired signal ``d[k]``
 33    by the inverse of the current denominator estimate, :math:`1/A(z)`. This
 34    transforms the OE problem into a (locally) more linear regression and often
 35    improves convergence compared to directly minimizing the OE surface.
 36
 37    This implementation follows the structure of Diniz (3rd ed., Alg. 10.4),
 38    using per-sample prefiltering recursions and a gradient-type update driven
 39    by the *filtered equation error*. It is restricted to **real-valued**
 40    signals (enforced by ``ensure_real_signals``).
 41
 42    Parameters
 43    ----------
 44    zeros_order : int
 45        Numerator order ``N`` (number of zeros). The feedforward part has
 46        ``N + 1`` coefficients.
 47    poles_order : int
 48        Denominator order ``M`` (number of poles). The feedback part has ``M``
 49        coefficients.
 50    step_size : float, optional
 51        Adaptation step size ``mu`` for the SM update. Default is 1e-3.
 52    w_init : array_like of float, optional
 53        Optional initial coefficient vector. If provided, it should have shape
 54        ``(M + N + 1,)`` following the parameter order described below. If None,
 55        the implementation initializes with zeros (and ignores ``w_init``).
 56
 57    Notes
 58    -----
 59    Parameterization (as implemented)
 60    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 61    The coefficient vector is arranged as:
 62
 63    - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``)
 64    - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``)
 65
 66    "True" IIR output and output error
 67    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 68    With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the
 69    last ``M`` outputs, the method computes a "true IIR" output:
 70
 71    .. math::
 72        y(k) = w^T(k)\\, [y(k-1),\\ldots,y(k-M),\\; x(k),\\ldots,x(k-N)]^T,
 73
 74    and the reported output error:
 75
 76    .. math::
 77        e(k) = d(k) - y(k).
 78
 79    Prefiltering by 1/A(z) (as implemented)
 80    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 81    Let ``a`` be the current feedback coefficient vector. The code implements
 82    the prefilter :math:`1/A(z)` through the recursions:
 83
 84    .. math::
 85        x_f(k) = x(k) + a^T x_f(k-1:k-M), \\qquad
 86        d_f(k) = d(k) + a^T d_f(k-1:k-M),
 87
 88    where the past filtered values are stored in ``xf_buffer`` and ``df_buffer``.
 89
 90    Filtered equation error and update
 91    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 92    The adaptation uses an auxiliary regressor built from the filtered signals
 93    (named ``regressor_s`` in the code). For ``M > 0``:
 94
 95    .. math::
 96        \\varphi_s(k) = [d_f(k-1),\\ldots,d_f(k-M),\\; x_f(k),\\ldots,x_f(k-N)]^T,
 97
 98    and for ``M = 0`` it reduces to the FIR case using only
 99    ``[x_f(k),\\ldots,x_f(k-N)]``.
100
101    The filtered equation error is:
102
103    .. math::
104        e_s(k) = d_f(k) - w^T(k)\\, \\varphi_s(k),
105
106    and the coefficient update used here is:
107
108    .. math::
109        w(k+1) = w(k) + 2\\mu\\, \\varphi_s(k)\\, e_s(k).
110
111    Stability procedure
112    ~~~~~~~~~~~~~~~~~~~
113    After each update (for ``M > 0``), the feedback coefficients ``w[:M]`` are
114    stabilized by reflecting poles outside the unit circle back inside (pole
115    reflection). This helps keep the prefilter :math:`1/A(z)` stable.
116
117    References
118    ----------
119    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
120       Implementation*, 3rd ed., Algorithm 10.4.
121    """
122
123    supports_complex: bool = False
124    zeros_order: int
125    poles_order: int
126    step_size: float
127    n_coeffs: int
128    y_buffer: np.ndarray
129    xf_buffer: np.ndarray
130    df_buffer: np.ndarray
131
132    def __init__(
133        self,
134        zeros_order: int,
135        poles_order: int,
136        step_size: float = 1e-3,
137        w_init: Optional[Union[np.ndarray, list]] = None,
138    ) -> None:
139        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
140
141        self.zeros_order = int(zeros_order)
142        self.poles_order = int(poles_order)
143        self.step_size = float(step_size)
144
145        self.n_coeffs = int(self.zeros_order + 1 + self.poles_order)
146        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
147
148        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
149
150        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1))
151        self.xf_buffer = np.zeros(max_buffer, dtype=np.float64)
152        self.df_buffer = np.zeros(max_buffer, dtype=np.float64)
153
154    def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray:
155        """
156        Reflects poles outside the unit circle back inside to ensure 
157        the prefilter $1/A(z)$ remains stable.
158        """
159        poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs))
160        poles: np.ndarray = np.roots(poly_coeffs)
161        mask: np.ndarray = np.abs(poles) > 1.0
162        if np.any(mask):
163            poles[mask] = 1.0 / np.conj(poles[mask])
164            new_poly: np.ndarray = np.poly(poles)
165            return -np.real(new_poly[1:])
166        return a_coeffs
167
168    @ensure_real_signals
169    @validate_input
170    def optimize(
171        self,
172        input_signal: np.ndarray,
173        desired_signal: np.ndarray,
174        verbose: bool = False,
175        return_internal_states: bool = False,
176    ) -> OptimizationResult:
177        """
178        Executes the Steiglitz–McBride adaptation loop.
179
180        Parameters
181        ----------
182        input_signal : array_like of float
183            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
184        desired_signal : array_like of float
185            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
186            Must have the same length as ``input_signal``.
187        verbose : bool, optional
188            If True, prints the total runtime after completion.
189        return_internal_states : bool, optional
190            If True, includes the filtered equation-error trajectory in
191            ``result.extra["auxiliary_error"]`` with shape ``(N,)``.
192
193        Returns
194        -------
195        OptimizationResult
196            Result object with fields:
197            - outputs : ndarray of float, shape ``(N,)``
198                "True IIR" output sequence ``y[k]``.
199            - errors : ndarray of float, shape ``(N,)``
200                Output error sequence ``e[k] = d[k] - y[k]``.
201            - coefficients : ndarray of float
202                Coefficient history recorded by the base class.
203            - error_type : str
204                Set to ``"a_posteriori"`` (the update is driven by the filtered
205                equation error).
206            - extra : dict
207                Empty unless ``return_internal_states=True``.
208        """
209        tic: float = time()
210
211        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
212        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
213        n_samples: int = int(x.size)
214
215        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
216        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
217        errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64)
218
219        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
220        x_padded[self.zeros_order:] = x
221
222        for k in range(n_samples):
223            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
224            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
225
226            y_k: float = float(np.dot(self.w, regressor))
227            outputs[k] = y_k
228            errors[k] = float(d[k] - y_k)
229
230            a_coeffs: np.ndarray = self.w[: self.poles_order]
231
232            xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order]))
233            df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order]))
234
235            self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1]))
236            self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1]))
237
238            if self.poles_order == 0:
239                regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1]
240            else:
241                regressor_s = np.concatenate(
242                    (
243                        self.df_buffer[1 : self.poles_order + 1],
244                        self.xf_buffer[: self.zeros_order + 1],
245                    )
246                )
247
248            e_s_k: float = float(df_k - np.dot(self.w, regressor_s))
249            errors_s[k] = e_s_k
250
251            self.w += 2.0 * self.step_size * regressor_s * e_s_k
252
253            if self.poles_order > 0:
254                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
255                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
256
257            self._record_history()
258
259        runtime_s: float = float(time() - tic)
260        if verbose:
261            print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms")
262
263        extra = {"auxiliary_error": errors_s} if return_internal_states else {}
264
265        return self._pack_results(
266            outputs=outputs,
267            errors=errors,
268            runtime_s=runtime_s,
269            error_type="a_posteriori",
270            extra=extra,
271        )

Steiglitz–McBride (SM) adaptive algorithm for IIR filters (real-valued).

The Steiglitz–McBride method is an iterative output-error (OE) approach implemented via a sequence of prefiltered equation-error updates. The key idea is to prefilter both the input x[k] and the desired signal d[k] by the inverse of the current denominator estimate, \( 1/A(z) \). This transforms the OE problem into a (locally) more linear regression and often improves convergence compared to directly minimizing the OE surface.

This implementation follows the structure of Diniz (3rd ed., Alg. 10.4), using per-sample prefiltering recursions and a gradient-type update driven by the filtered equation error. It is restricted to real-valued signals (enforced by ensure_real_signals).

Parameters

zeros_order : int Numerator order N (number of zeros). The feedforward part has N + 1 coefficients. poles_order : int Denominator order M (number of poles). The feedback part has M coefficients. step_size : float, optional Adaptation step size mu for the SM update. Default is 1e-3. w_init : array_like of float, optional Optional initial coefficient vector. If provided, it should have shape (M + N + 1,) following the parameter order described below. If None, the implementation initializes with zeros (and ignores w_init).

Notes

Parameterization (as implemented) ~~~~~~~~~ The coefficient vector is arranged as:

  • w[:M]: feedback (pole) coefficients (often denoted a)
  • w[M:]: feedforward (zero) coefficients (often denoted b)

"True" IIR output and output error ~~~~~~~~~~ With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the last M outputs, the method computes a "true IIR" output:

$$y(k) = w^T(k)\, [y(k-1),\ldots,y(k-M),\; x(k),\ldots,x(k-N)]^T,$$

and the reported output error:

$$e(k) = d(k) - y(k).$$

Prefiltering by 1/A(z) (as implemented) ~~~~~~~~~~~ Let a be the current feedback coefficient vector. The code implements the prefilter \( 1/A(z) \) through the recursions:

$$x_f(k) = x(k) + a^T x_f(k-1:k-M), \qquad d_f(k) = d(k) + a^T d_f(k-1:k-M),$$

where the past filtered values are stored in xf_buffer and df_buffer.

Filtered equation error and update ~~~~~~~~~~ The adaptation uses an auxiliary regressor built from the filtered signals (named regressor_s in the code). For M > 0:

$$\varphi_s(k) = [d_f(k-1),\ldots,d_f(k-M),\; x_f(k),\ldots,x_f(k-N)]^T,$$

and for M = 0 it reduces to the FIR case using only [x_f(k),\ldots,x_f(k-N)].

The filtered equation error is:

$$e_s(k) = d_f(k) - w^T(k)\, \varphi_s(k),$$

and the coefficient update used here is:

$$w(k+1) = w(k) + 2\mu\, \varphi_s(k)\, e_s(k).$$

Stability procedure ~~~~~~~ After each update (for M > 0), the feedback coefficients w[:M] are stabilized by reflecting poles outside the unit circle back inside (pole reflection). This helps keep the prefilter \( 1/A(z) \) stable.

References


SteiglitzMcBride( zeros_order: int, poles_order: int, step_size: float = 0.001, w_init: Union[numpy.ndarray, list, NoneType] = None)
132    def __init__(
133        self,
134        zeros_order: int,
135        poles_order: int,
136        step_size: float = 1e-3,
137        w_init: Optional[Union[np.ndarray, list]] = None,
138    ) -> None:
139        super().__init__(filter_order=zeros_order + poles_order, w_init=w_init)
140
141        self.zeros_order = int(zeros_order)
142        self.poles_order = int(poles_order)
143        self.step_size = float(step_size)
144
145        self.n_coeffs = int(self.zeros_order + 1 + self.poles_order)
146        self.w = np.zeros(self.n_coeffs, dtype=np.float64)
147
148        self.y_buffer = np.zeros(self.poles_order, dtype=np.float64)
149
150        max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1))
151        self.xf_buffer = np.zeros(max_buffer, dtype=np.float64)
152        self.df_buffer = np.zeros(max_buffer, dtype=np.float64)
supports_complex: bool = False
zeros_order: int
poles_order: int
step_size: float
n_coeffs: int
y_buffer: numpy.ndarray
xf_buffer: numpy.ndarray
df_buffer: numpy.ndarray
w
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
168    @ensure_real_signals
169    @validate_input
170    def optimize(
171        self,
172        input_signal: np.ndarray,
173        desired_signal: np.ndarray,
174        verbose: bool = False,
175        return_internal_states: bool = False,
176    ) -> OptimizationResult:
177        """
178        Executes the Steiglitz–McBride adaptation loop.
179
180        Parameters
181        ----------
182        input_signal : array_like of float
183            Real-valued input sequence ``x[k]`` with shape ``(N,)``.
184        desired_signal : array_like of float
185            Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``.
186            Must have the same length as ``input_signal``.
187        verbose : bool, optional
188            If True, prints the total runtime after completion.
189        return_internal_states : bool, optional
190            If True, includes the filtered equation-error trajectory in
191            ``result.extra["auxiliary_error"]`` with shape ``(N,)``.
192
193        Returns
194        -------
195        OptimizationResult
196            Result object with fields:
197            - outputs : ndarray of float, shape ``(N,)``
198                "True IIR" output sequence ``y[k]``.
199            - errors : ndarray of float, shape ``(N,)``
200                Output error sequence ``e[k] = d[k] - y[k]``.
201            - coefficients : ndarray of float
202                Coefficient history recorded by the base class.
203            - error_type : str
204                Set to ``"a_posteriori"`` (the update is driven by the filtered
205                equation error).
206            - extra : dict
207                Empty unless ``return_internal_states=True``.
208        """
209        tic: float = time()
210
211        x: np.ndarray = np.asarray(input_signal, dtype=np.float64)
212        d: np.ndarray = np.asarray(desired_signal, dtype=np.float64)
213        n_samples: int = int(x.size)
214
215        outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64)
216        errors: np.ndarray = np.zeros(n_samples, dtype=np.float64)
217        errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64)
218
219        x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64)
220        x_padded[self.zeros_order:] = x
221
222        for k in range(n_samples):
223            reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1]
224            regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x))
225
226            y_k: float = float(np.dot(self.w, regressor))
227            outputs[k] = y_k
228            errors[k] = float(d[k] - y_k)
229
230            a_coeffs: np.ndarray = self.w[: self.poles_order]
231
232            xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order]))
233            df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order]))
234
235            self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1]))
236            self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1]))
237
238            if self.poles_order == 0:
239                regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1]
240            else:
241                regressor_s = np.concatenate(
242                    (
243                        self.df_buffer[1 : self.poles_order + 1],
244                        self.xf_buffer[: self.zeros_order + 1],
245                    )
246                )
247
248            e_s_k: float = float(df_k - np.dot(self.w, regressor_s))
249            errors_s[k] = e_s_k
250
251            self.w += 2.0 * self.step_size * regressor_s * e_s_k
252
253            if self.poles_order > 0:
254                self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order])
255                self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1]))
256
257            self._record_history()
258
259        runtime_s: float = float(time() - tic)
260        if verbose:
261            print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms")
262
263        extra = {"auxiliary_error": errors_s} if return_internal_states else {}
264
265        return self._pack_results(
266            outputs=outputs,
267            errors=errors,
268            runtime_s=runtime_s,
269            error_type="a_posteriori",
270            extra=extra,
271        )

Executes the Steiglitz–McBride adaptation loop.

Parameters

input_signal : array_like of float Real-valued input sequence x[k] with shape (N,). desired_signal : array_like of float Real-valued desired/reference sequence d[k] with shape (N,). Must have the same length as input_signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the filtered equation-error trajectory in result.extra["auxiliary_error"] with shape (N,).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) "True IIR" output sequence y[k]. - errors : ndarray of float, shape (N,) Output error sequence e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_posteriori" (the update is driven by the filtered equation error). - extra : dict Empty unless return_internal_states=True.

class BilinearRLS(pydaptivefiltering.AdaptiveFilter):
 29class BilinearRLS(AdaptiveFilter):
 30    """
 31    Bilinear RLS adaptive filter (real-valued).
 32
 33    RLS algorithm with a fixed 4-dimensional *bilinear* regressor structure,
 34    following Diniz (Alg. 11.3). The regressor couples the current input with
 35    past desired samples to model a simple bilinear relationship.
 36
 37    Parameters
 38    ----------
 39    forgetting_factor : float, optional
 40        Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.98.
 41    delta : float, optional
 42        Regularization parameter used to initialize the inverse correlation
 43        matrix as ``P(0) = I/delta`` (requires ``delta > 0``). Default is 1.0.
 44    w_init : array_like of float, optional
 45        Initial coefficient vector ``w(0)`` with shape ``(4,)``. If None,
 46        initializes with zeros.
 47    safe_eps : float, optional
 48        Small positive constant used to guard denominators. Default is 1e-12.
 49
 50    Notes
 51    -----
 52    Real-valued only
 53        This implementation is restricted to real-valued signals and coefficients
 54        (``supports_complex=False``). The constraint is enforced via
 55        ``@ensure_real_signals`` on :meth:`optimize`.
 56
 57    Bilinear regressor (as implemented)
 58        This implementation uses a 4-component regressor:
 59
 60        .. math::
 61            u[k] =
 62            \\begin{bmatrix}
 63                x[k] \\\\
 64                d[k-1] \\\\
 65                x[k]d[k-1] \\\\
 66                x[k-1]d[k-1]
 67            \\end{bmatrix}
 68            \\in \\mathbb{R}^{4}.
 69
 70        The state ``x[k-1]`` and ``d[k-1]`` are taken from the previous iteration,
 71        with ``x[-1] = 0`` and ``d[-1] = 0`` at initialization.
 72
 73    RLS recursion (a priori form)
 74        With
 75
 76        .. math::
 77            y[k] = w^T[k-1] u[k], \\qquad e[k] = d[k] - y[k],
 78
 79        the gain vector is
 80
 81        .. math::
 82            g[k] = \\frac{P[k-1] u[k]}{\\lambda + u^T[k] P[k-1] u[k]},
 83
 84        the inverse correlation update is
 85
 86        .. math::
 87            P[k] = \\frac{1}{\\lambda}\\left(P[k-1] - g[k] u^T[k] P[k-1]\\right),
 88
 89        and the coefficient update is
 90
 91        .. math::
 92            w[k] = w[k-1] + g[k] e[k].
 93
 94    Implementation details
 95        - The denominator ``lambda + u^T P u`` is guarded by ``safe_eps`` to avoid
 96          numerical issues when very small.
 97        - Coefficient history is recorded via the base class.
 98
 99    References
100    ----------
101    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
102       Implementation*, 5th ed., Algorithm 11.3.
103    """
104
105    supports_complex: bool = False
106
107    def __init__(
108        self,
109        forgetting_factor: float = 0.98,
110        delta: float = 1.0,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        safe_eps: float = 1e-12,
114    ) -> None:
115        n_coeffs = 4
116        super().__init__(filter_order=n_coeffs - 1, w_init=w_init)
117
118        self.lambda_factor = float(forgetting_factor)
119        if not (0.0 < self.lambda_factor <= 1.0):
120            raise ValueError(
121                f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}."
122            )
123
124        self.delta = float(delta)
125        if self.delta <= 0.0:
126            raise ValueError(f"delta must be > 0. Got delta={self.delta}.")
127
128        self._safe_eps = float(safe_eps)
129
130        self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta
131
132    @validate_input
133    @ensure_real_signals
134    def optimize(
135        self,
136        input_signal: np.ndarray,
137        desired_signal: np.ndarray,
138        verbose: bool = False,
139        return_internal_states: bool = False,
140    ) -> OptimizationResult:
141        """
142        Executes the bilinear RLS adaptation loop over paired input/desired sequences.
143
144        Parameters
145        ----------
146        input_signal : array_like of float
147            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
148        desired_signal : array_like of float
149            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
150        verbose : bool, optional
151            If True, prints the total runtime after completion.
152        return_internal_states : bool, optional
153            If True, includes the last internal states in ``result.extra``:
154            ``"P_last"``, ``"last_regressor"`` (``u[k]``), and ``"last_gain"`` (``g[k]``).
155
156        Returns
157        -------
158        OptimizationResult
159            Result object with fields:
160            - outputs : ndarray of float, shape ``(N,)``
161                Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``.
162            - errors : ndarray of float, shape ``(N,)``
163                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
164            - coefficients : ndarray of float
165                Coefficient history recorded by the base class.
166            - error_type : str
167                Set to ``"a_priori"``.
168            - extra : dict, optional
169                Present only if ``return_internal_states=True``.
170        """
171        t0 = perf_counter()
172
173        x = np.asarray(input_signal, dtype=np.float64).ravel()
174        d = np.asarray(desired_signal, dtype=np.float64).ravel()
175
176        n_samples = int(x.size)
177        outputs = np.zeros(n_samples, dtype=np.float64)
178        errors = np.zeros(n_samples, dtype=np.float64)
179
180        x_prev = 0.0
181        d_prev = 0.0
182
183        last_u: Optional[np.ndarray] = None
184        last_k: Optional[np.ndarray] = None
185
186        for k in range(n_samples):
187            u = np.array(
188                [x[k], d_prev, x[k] * d_prev, x_prev * d_prev],
189                dtype=np.float64,
190            )
191            last_u = u
192
193            y_k = float(np.dot(self.w, u))
194            outputs[k] = y_k
195
196            e_k = float(d[k] - y_k)
197            errors[k] = e_k
198
199            Pu = self.P @ u
200            denom = float(self.lambda_factor + (u @ Pu))
201            if abs(denom) < self._safe_eps:
202                denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps)
203
204            k_gain = Pu / denom
205            last_k = k_gain
206
207            self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor
208
209            self.w = self.w + k_gain * e_k
210            self._record_history()
211
212            x_prev = float(x[k])
213            d_prev = float(d[k])
214
215        runtime_s = float(perf_counter() - t0)
216        if verbose:
217            print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms")
218
219        extra: Optional[Dict[str, Any]] = None
220        if return_internal_states:
221            extra = {
222                "P_last": self.P.copy(),
223                "last_regressor": None if last_u is None else last_u.copy(),
224                "last_gain": None if last_k is None else last_k.copy(),
225            }
226
227        return self._pack_results(
228            outputs=outputs,
229            errors=errors,
230            runtime_s=runtime_s,
231            error_type="a_priori",
232            extra=extra,
233        )

Bilinear RLS adaptive filter (real-valued).

RLS algorithm with a fixed 4-dimensional bilinear regressor structure, following Diniz (Alg. 11.3). The regressor couples the current input with past desired samples to model a simple bilinear relationship.

Parameters

forgetting_factor : float, optional Forgetting factor lambda with 0 < lambda <= 1. Default is 0.98. delta : float, optional Regularization parameter used to initialize the inverse correlation matrix as P(0) = I/delta (requires delta > 0). Default is 1.0. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (4,). If None, initializes with zeros. safe_eps : float, optional Small positive constant used to guard denominators. Default is 1e-12.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Bilinear regressor (as implemented) This implementation uses a 4-component regressor:

$$u[k] =

\begin{bmatrix} x[k] \ d[k-1] \ x[k]d[k-1] \ x[k-1]d[k-1] \end{bmatrix} \in \mathbb{R}^{4}.$$

The state ``x[k-1]`` and ``d[k-1]`` are taken from the previous iteration,
with ``x[-1] = 0`` and ``d[-1] = 0`` at initialization.

RLS recursion (a priori form) With

$$y[k] = w^T[k-1] u[k], \qquad e[k] = d[k] - y[k],$$

the gain vector is

$$g[k] = \frac{P[k-1] u[k]}{\lambda + u^T[k] P[k-1] u[k]},$$

the inverse correlation update is

$$P[k] = \frac{1}{\lambda}\left(P[k-1] - g[k] u^T[k] P[k-1]\right),$$

and the coefficient update is

$$w[k] = w[k-1] + g[k] e[k].$$

Implementation details - The denominator lambda + u^T P u is guarded by safe_eps to avoid numerical issues when very small. - Coefficient history is recorded via the base class.

References


BilinearRLS( forgetting_factor: float = 0.98, delta: float = 1.0, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
107    def __init__(
108        self,
109        forgetting_factor: float = 0.98,
110        delta: float = 1.0,
111        w_init: Optional[ArrayLike] = None,
112        *,
113        safe_eps: float = 1e-12,
114    ) -> None:
115        n_coeffs = 4
116        super().__init__(filter_order=n_coeffs - 1, w_init=w_init)
117
118        self.lambda_factor = float(forgetting_factor)
119        if not (0.0 < self.lambda_factor <= 1.0):
120            raise ValueError(
121                f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}."
122            )
123
124        self.delta = float(delta)
125        if self.delta <= 0.0:
126            raise ValueError(f"delta must be > 0. Got delta={self.delta}.")
127
128        self._safe_eps = float(safe_eps)
129
130        self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta
supports_complex: bool = False
lambda_factor
delta
P
@validate_input
@ensure_real_signals
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
132    @validate_input
133    @ensure_real_signals
134    def optimize(
135        self,
136        input_signal: np.ndarray,
137        desired_signal: np.ndarray,
138        verbose: bool = False,
139        return_internal_states: bool = False,
140    ) -> OptimizationResult:
141        """
142        Executes the bilinear RLS adaptation loop over paired input/desired sequences.
143
144        Parameters
145        ----------
146        input_signal : array_like of float
147            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
148        desired_signal : array_like of float
149            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
150        verbose : bool, optional
151            If True, prints the total runtime after completion.
152        return_internal_states : bool, optional
153            If True, includes the last internal states in ``result.extra``:
154            ``"P_last"``, ``"last_regressor"`` (``u[k]``), and ``"last_gain"`` (``g[k]``).
155
156        Returns
157        -------
158        OptimizationResult
159            Result object with fields:
160            - outputs : ndarray of float, shape ``(N,)``
161                Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``.
162            - errors : ndarray of float, shape ``(N,)``
163                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
164            - coefficients : ndarray of float
165                Coefficient history recorded by the base class.
166            - error_type : str
167                Set to ``"a_priori"``.
168            - extra : dict, optional
169                Present only if ``return_internal_states=True``.
170        """
171        t0 = perf_counter()
172
173        x = np.asarray(input_signal, dtype=np.float64).ravel()
174        d = np.asarray(desired_signal, dtype=np.float64).ravel()
175
176        n_samples = int(x.size)
177        outputs = np.zeros(n_samples, dtype=np.float64)
178        errors = np.zeros(n_samples, dtype=np.float64)
179
180        x_prev = 0.0
181        d_prev = 0.0
182
183        last_u: Optional[np.ndarray] = None
184        last_k: Optional[np.ndarray] = None
185
186        for k in range(n_samples):
187            u = np.array(
188                [x[k], d_prev, x[k] * d_prev, x_prev * d_prev],
189                dtype=np.float64,
190            )
191            last_u = u
192
193            y_k = float(np.dot(self.w, u))
194            outputs[k] = y_k
195
196            e_k = float(d[k] - y_k)
197            errors[k] = e_k
198
199            Pu = self.P @ u
200            denom = float(self.lambda_factor + (u @ Pu))
201            if abs(denom) < self._safe_eps:
202                denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps)
203
204            k_gain = Pu / denom
205            last_k = k_gain
206
207            self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor
208
209            self.w = self.w + k_gain * e_k
210            self._record_history()
211
212            x_prev = float(x[k])
213            d_prev = float(d[k])
214
215        runtime_s = float(perf_counter() - t0)
216        if verbose:
217            print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms")
218
219        extra: Optional[Dict[str, Any]] = None
220        if return_internal_states:
221            extra = {
222                "P_last": self.P.copy(),
223                "last_regressor": None if last_u is None else last_u.copy(),
224                "last_gain": None if last_k is None else last_k.copy(),
225            }
226
227        return self._pack_results(
228            outputs=outputs,
229            errors=errors,
230            runtime_s=runtime_s,
231            error_type="a_priori",
232            extra=extra,
233        )

Executes the bilinear RLS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "P_last", "last_regressor" (u[k]), and "last_gain" (g[k]).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar a priori output sequence, y[k] = w^T[k-1] u[k]. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class ComplexRBF(pydaptivefiltering.AdaptiveFilter):
 28class ComplexRBF(AdaptiveFilter):
 29    """
 30    Complex Radial Basis Function (CRBF) network (complex-valued).
 31
 32    Implements a complex-valued RBF adaptive model (Algorithm 11.6 - Diniz).
 33    The model output is computed as:
 34
 35        f_p(u) = exp( -||u - c_p||^2 / sigma_p^2 )
 36        y[k]   = w^H f(u_k)
 37
 38    where:
 39      - u_k is the input regressor (dimension = input_dim),
 40      - c_p are complex centers ("vet" in the original code),
 41      - sigma_p are real spreads,
 42      - w are complex neuron weights.
 43
 44    Input handling
 45    --------------
 46    This implementation accepts two input formats in `optimize`:
 47
 48    1) 1D input signal x[k] (shape (N,)):
 49       A tapped-delay regressor u_k of length `input_dim` is formed internally.
 50
 51    2) 2D regressor matrix U (shape (N, input_dim)):
 52       Each row is used directly as u_k.
 53
 54    Notes
 55    -----
 56    - Complex-valued implementation (`supports_complex=True`).
 57    - The base class `filter_order` is used here as a size indicator (n_neurons-1).
 58    - `OptimizationResult.coefficients` stores the history of neuron weights `w`.
 59      Centers and spreads can be returned via `result.extra` when requested.
 60    """
 61
 62    supports_complex: bool = True
 63
 64    def __init__(
 65        self,
 66        n_neurons: int,
 67        input_dim: int,
 68        ur: float = 0.01,
 69        uw: float = 0.01,
 70        us: float = 0.01,
 71        w_init: Optional[ArrayLike] = None,
 72        *,
 73        sigma_init: float = 1.0,
 74        rng: Optional[np.random.Generator] = None,
 75    ) -> None:
 76        """
 77        Parameters
 78        ----------
 79        n_neurons:
 80            Number of RBF neurons.
 81        input_dim:
 82            Dimension of the input regressor u_k.
 83        ur:
 84            Step-size for centers update.
 85        uw:
 86            Step-size for weights update.
 87        us:
 88            Step-size for spread (sigma) update.
 89        w_init:
 90            Optional initial neuron weights (length n_neurons). If None, random complex.
 91        sigma_init:
 92            Initial spread value used for all neurons (must be > 0).
 93        rng:
 94            Optional numpy random generator for reproducible initialization.
 95        """
 96        n_neurons = int(n_neurons)
 97        input_dim = int(input_dim)
 98        if n_neurons <= 0:
 99            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
100        if input_dim <= 0:
101            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
102        if sigma_init <= 0.0:
103            raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.")
104
105        # filter_order used as "generic size indicator" (n_neurons-1 => n_neurons taps)
106        super().__init__(filter_order=n_neurons - 1, w_init=None)
107
108        self.n_neurons = n_neurons
109        self.input_dim = input_dim
110        self.ur = float(ur)
111        self.uw = float(uw)
112        self.us = float(us)
113
114        self._rng = rng if rng is not None else np.random.default_rng()
115
116        # weights
117        if w_init is None:
118            w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons)
119            self.w = w0.astype(complex)
120        else:
121            w0 = np.asarray(w_init, dtype=complex).reshape(-1)
122            if w0.size != n_neurons:
123                raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.")
124            self.w = w0
125
126        # centers (complex), shape (n_neurons, input_dim)
127        self.vet = 0.5 * (
128            self._rng.standard_normal((n_neurons, input_dim))
129            + 1j * self._rng.standard_normal((n_neurons, input_dim))
130        ).astype(complex)
131
132        # spreads (real), shape (n_neurons,)
133        self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init)
134
135        # reset base history with correct initial w
136        self.w_history = []
137        self._record_history()
138
139    @staticmethod
140    def _build_regressors_from_signal(x: np.ndarray, input_dim: int) -> np.ndarray:
141        """Build tapped-delay regressors from a 1D signal (N,)->(N,input_dim)."""
142        x = np.asarray(x, dtype=complex).ravel()
143        n = int(x.size)
144        m = int(input_dim - 1)
145
146        x_padded = np.zeros(n + m, dtype=complex)
147        x_padded[m:] = x
148
149        U = np.zeros((n, input_dim), dtype=complex)
150        for k in range(n):
151            U[k, :] = x_padded[k : k + input_dim][::-1]
152        return U
153
154    @staticmethod
155    def _squared_distance_complex(u: np.ndarray, centers: np.ndarray) -> np.ndarray:
156        """
157        Compute ||u - c_p||^2 for each center row.
158        u: (input_dim,)
159        centers: (n_neurons, input_dim)
160        returns: (n_neurons,)
161        """
162        diff = u[None, :] - centers
163        # squared Euclidean distance in C^d: sum(Re^2 + Im^2)
164        return np.sum(diff.real**2 + diff.imag**2, axis=1)
165
166    def optimize(
167        self,
168        input_signal: Union[np.ndarray, list],
169        desired_signal: Union[np.ndarray, list],
170        verbose: bool = False,
171        return_internal_states: bool = False,
172        *,
173        safe_eps: float = 1e-12,
174    ) -> OptimizationResult:
175        """
176        Run CRBF adaptation.
177
178        Parameters
179        ----------
180        input_signal:
181            Either:
182              - 1D signal x[k] with shape (N,), or
183              - regressor matrix U with shape (N, input_dim).
184        desired_signal:
185            Desired signal d[k], shape (N,).
186        verbose:
187            If True, prints runtime.
188        return_internal_states:
189            If True, returns final centers/spreads and last activation vector in result.extra.
190        safe_eps:
191            Small epsilon to protect denominators (sigma and other divisions).
192
193        Returns
194        -------
195        OptimizationResult
196            outputs:
197                Model output y[k].
198            errors:
199                A priori error e[k] = d[k] - y[k].
200            coefficients:
201                History of neuron weights w[k] (shape (N+1, n_neurons) in base history).
202            error_type:
203                "a_priori".
204
205        Extra (when return_internal_states=True)
206        --------------------------------------
207        extra["centers_last"]:
208            Final centers array (n_neurons, input_dim).
209        extra["sigma_last"]:
210            Final spreads array (n_neurons,).
211        extra["last_activation"]:
212            Last activation vector f(u_k) (n_neurons,).
213        extra["last_regressor"]:
214            Last regressor u_k (input_dim,).
215        """
216        t0 = perf_counter()
217
218        x_in = np.asarray(input_signal)
219        d = np.asarray(desired_signal, dtype=complex).ravel()
220
221        # Build regressors
222        if x_in.ndim == 1:
223            U = self._build_regressors_from_signal(x_in, self.input_dim)
224        elif x_in.ndim == 2:
225            U = np.asarray(x_in, dtype=complex)
226            if U.shape[1] != self.input_dim:
227                raise ValueError(
228                    f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}."
229                )
230        else:
231            raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).")
232
233        N = int(U.shape[0])
234        if d.size != N:
235            raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).")
236
237        outputs = np.zeros(N, dtype=complex)
238        errors  = np.zeros(N, dtype=complex)
239
240        last_f: Optional[np.ndarray] = None
241        last_u: Optional[np.ndarray] = None
242
243        for k in range(N):
244            u = U[k, :]
245            last_u = u
246
247            # activations
248            dis_sq = self._squared_distance_complex(u, self.vet)
249            sigma_sq = np.maximum(self.sigma**2, float(safe_eps))
250            f = np.exp(-dis_sq / sigma_sq)
251            last_f = f
252
253            # output and error (a priori)
254            y_k = complex(np.vdot(self.w, f))  # conj(w) @ f
255            outputs[k] = y_k
256            e_k = d[k] - y_k
257            errors[k] = e_k
258
259            # weight update (kept as in your code: 2*uw*e*f)
260            self.w = self.w + (2.0 * self.uw) * e_k * f
261
262            # sigma update (kept structurally similar, with protections)
263            denom_sigma = np.maximum(self.sigma**3, float(safe_eps))
264            grad_sigma = (
265                (2.0 * self.us)
266                * f
267                * (e_k.real * self.w.real + e_k.imag * self.w.imag)
268                * dis_sq
269                / denom_sigma
270            )
271            self.sigma = self.sigma + grad_sigma
272            self.sigma = np.maximum(self.sigma, float(safe_eps))
273
274            # centers update (vectorized over neurons; same intent as your loop)
275            denom_c = np.maximum(self.sigma**2, float(safe_eps))
276            term = (e_k.real * self.w.real)[:, None] * (u - self.vet).real + 1j * (
277                (e_k.imag * self.w.imag)[:, None] * (u - self.vet).imag
278            )
279            self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * term) / denom_c[:, None]
280
281            self._record_history()
282
283        runtime_s = float(perf_counter() - t0)
284        if verbose:
285            print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms")
286
287        extra: Optional[Dict[str, Any]] = None
288        if return_internal_states:
289            extra = {
290                "centers_last": self.vet.copy(),
291                "sigma_last": self.sigma.copy(),
292                "last_activation": None if last_f is None else np.asarray(last_f).copy(),
293                "last_regressor": None if last_u is None else np.asarray(last_u).copy(),
294                "input_dim": int(self.input_dim),
295                "n_neurons": int(self.n_neurons),
296            }
297
298        return self._pack_results(
299            outputs=outputs,
300            errors=errors,
301            runtime_s=runtime_s,
302            error_type="a_priori",
303            extra=extra,
304        )

Complex Radial Basis Function (CRBF) network (complex-valued).

Implements a complex-valued RBF adaptive model (Algorithm 11.6 - Diniz). The model output is computed as:

f_p(u) = exp( -||u - c_p||^2 / sigma_p^2 )
y[k]   = w^H f(u_k)

where:

  • u_k is the input regressor (dimension = input_dim),
  • c_p are complex centers ("vet" in the original code),
  • sigma_p are real spreads,
  • w are complex neuron weights.

Input handling

This implementation accepts two input formats in optimize:

1) 1D input signal x[k] (shape (N,)): A tapped-delay regressor u_k of length input_dim is formed internally.

2) 2D regressor matrix U (shape (N, input_dim)): Each row is used directly as u_k.

Notes

  • Complex-valued implementation (supports_complex=True).
  • The base class filter_order is used here as a size indicator (n_neurons-1).
  • OptimizationResult.coefficients stores the history of neuron weights w. Centers and spreads can be returned via result.extra when requested.
ComplexRBF( n_neurons: int, input_dim: int, ur: float = 0.01, uw: float = 0.01, us: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, sigma_init: float = 1.0, rng: Optional[numpy.random._generator.Generator] = None)
 64    def __init__(
 65        self,
 66        n_neurons: int,
 67        input_dim: int,
 68        ur: float = 0.01,
 69        uw: float = 0.01,
 70        us: float = 0.01,
 71        w_init: Optional[ArrayLike] = None,
 72        *,
 73        sigma_init: float = 1.0,
 74        rng: Optional[np.random.Generator] = None,
 75    ) -> None:
 76        """
 77        Parameters
 78        ----------
 79        n_neurons:
 80            Number of RBF neurons.
 81        input_dim:
 82            Dimension of the input regressor u_k.
 83        ur:
 84            Step-size for centers update.
 85        uw:
 86            Step-size for weights update.
 87        us:
 88            Step-size for spread (sigma) update.
 89        w_init:
 90            Optional initial neuron weights (length n_neurons). If None, random complex.
 91        sigma_init:
 92            Initial spread value used for all neurons (must be > 0).
 93        rng:
 94            Optional numpy random generator for reproducible initialization.
 95        """
 96        n_neurons = int(n_neurons)
 97        input_dim = int(input_dim)
 98        if n_neurons <= 0:
 99            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
100        if input_dim <= 0:
101            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
102        if sigma_init <= 0.0:
103            raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.")
104
105        # filter_order used as "generic size indicator" (n_neurons-1 => n_neurons taps)
106        super().__init__(filter_order=n_neurons - 1, w_init=None)
107
108        self.n_neurons = n_neurons
109        self.input_dim = input_dim
110        self.ur = float(ur)
111        self.uw = float(uw)
112        self.us = float(us)
113
114        self._rng = rng if rng is not None else np.random.default_rng()
115
116        # weights
117        if w_init is None:
118            w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons)
119            self.w = w0.astype(complex)
120        else:
121            w0 = np.asarray(w_init, dtype=complex).reshape(-1)
122            if w0.size != n_neurons:
123                raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.")
124            self.w = w0
125
126        # centers (complex), shape (n_neurons, input_dim)
127        self.vet = 0.5 * (
128            self._rng.standard_normal((n_neurons, input_dim))
129            + 1j * self._rng.standard_normal((n_neurons, input_dim))
130        ).astype(complex)
131
132        # spreads (real), shape (n_neurons,)
133        self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init)
134
135        # reset base history with correct initial w
136        self.w_history = []
137        self._record_history()

Parameters

n_neurons: Number of RBF neurons. input_dim: Dimension of the input regressor u_k. ur: Step-size for centers update. uw: Step-size for weights update. us: Step-size for spread (sigma) update. w_init: Optional initial neuron weights (length n_neurons). If None, random complex. sigma_init: Initial spread value used for all neurons (must be > 0). rng: Optional numpy random generator for reproducible initialization.

supports_complex: bool = True
n_neurons
input_dim
ur
uw
us
vet
sigma
w_history
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list], verbose: bool = False, return_internal_states: bool = False, *, safe_eps: float = 1e-12) -> pydaptivefiltering.base.OptimizationResult:
166    def optimize(
167        self,
168        input_signal: Union[np.ndarray, list],
169        desired_signal: Union[np.ndarray, list],
170        verbose: bool = False,
171        return_internal_states: bool = False,
172        *,
173        safe_eps: float = 1e-12,
174    ) -> OptimizationResult:
175        """
176        Run CRBF adaptation.
177
178        Parameters
179        ----------
180        input_signal:
181            Either:
182              - 1D signal x[k] with shape (N,), or
183              - regressor matrix U with shape (N, input_dim).
184        desired_signal:
185            Desired signal d[k], shape (N,).
186        verbose:
187            If True, prints runtime.
188        return_internal_states:
189            If True, returns final centers/spreads and last activation vector in result.extra.
190        safe_eps:
191            Small epsilon to protect denominators (sigma and other divisions).
192
193        Returns
194        -------
195        OptimizationResult
196            outputs:
197                Model output y[k].
198            errors:
199                A priori error e[k] = d[k] - y[k].
200            coefficients:
201                History of neuron weights w[k] (shape (N+1, n_neurons) in base history).
202            error_type:
203                "a_priori".
204
205        Extra (when return_internal_states=True)
206        --------------------------------------
207        extra["centers_last"]:
208            Final centers array (n_neurons, input_dim).
209        extra["sigma_last"]:
210            Final spreads array (n_neurons,).
211        extra["last_activation"]:
212            Last activation vector f(u_k) (n_neurons,).
213        extra["last_regressor"]:
214            Last regressor u_k (input_dim,).
215        """
216        t0 = perf_counter()
217
218        x_in = np.asarray(input_signal)
219        d = np.asarray(desired_signal, dtype=complex).ravel()
220
221        # Build regressors
222        if x_in.ndim == 1:
223            U = self._build_regressors_from_signal(x_in, self.input_dim)
224        elif x_in.ndim == 2:
225            U = np.asarray(x_in, dtype=complex)
226            if U.shape[1] != self.input_dim:
227                raise ValueError(
228                    f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}."
229                )
230        else:
231            raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).")
232
233        N = int(U.shape[0])
234        if d.size != N:
235            raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).")
236
237        outputs = np.zeros(N, dtype=complex)
238        errors  = np.zeros(N, dtype=complex)
239
240        last_f: Optional[np.ndarray] = None
241        last_u: Optional[np.ndarray] = None
242
243        for k in range(N):
244            u = U[k, :]
245            last_u = u
246
247            # activations
248            dis_sq = self._squared_distance_complex(u, self.vet)
249            sigma_sq = np.maximum(self.sigma**2, float(safe_eps))
250            f = np.exp(-dis_sq / sigma_sq)
251            last_f = f
252
253            # output and error (a priori)
254            y_k = complex(np.vdot(self.w, f))  # conj(w) @ f
255            outputs[k] = y_k
256            e_k = d[k] - y_k
257            errors[k] = e_k
258
259            # weight update (kept as in your code: 2*uw*e*f)
260            self.w = self.w + (2.0 * self.uw) * e_k * f
261
262            # sigma update (kept structurally similar, with protections)
263            denom_sigma = np.maximum(self.sigma**3, float(safe_eps))
264            grad_sigma = (
265                (2.0 * self.us)
266                * f
267                * (e_k.real * self.w.real + e_k.imag * self.w.imag)
268                * dis_sq
269                / denom_sigma
270            )
271            self.sigma = self.sigma + grad_sigma
272            self.sigma = np.maximum(self.sigma, float(safe_eps))
273
274            # centers update (vectorized over neurons; same intent as your loop)
275            denom_c = np.maximum(self.sigma**2, float(safe_eps))
276            term = (e_k.real * self.w.real)[:, None] * (u - self.vet).real + 1j * (
277                (e_k.imag * self.w.imag)[:, None] * (u - self.vet).imag
278            )
279            self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * term) / denom_c[:, None]
280
281            self._record_history()
282
283        runtime_s = float(perf_counter() - t0)
284        if verbose:
285            print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms")
286
287        extra: Optional[Dict[str, Any]] = None
288        if return_internal_states:
289            extra = {
290                "centers_last": self.vet.copy(),
291                "sigma_last": self.sigma.copy(),
292                "last_activation": None if last_f is None else np.asarray(last_f).copy(),
293                "last_regressor": None if last_u is None else np.asarray(last_u).copy(),
294                "input_dim": int(self.input_dim),
295                "n_neurons": int(self.n_neurons),
296            }
297
298        return self._pack_results(
299            outputs=outputs,
300            errors=errors,
301            runtime_s=runtime_s,
302            error_type="a_priori",
303            extra=extra,
304        )

Run CRBF adaptation.

Parameters

input_signal: Either: - 1D signal x[k] with shape (N,), or - regressor matrix U with shape (N, input_dim). desired_signal: Desired signal d[k], shape (N,). verbose: If True, prints runtime. return_internal_states: If True, returns final centers/spreads and last activation vector in result.extra. safe_eps: Small epsilon to protect denominators (sigma and other divisions).

Returns

OptimizationResult outputs: Model output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of neuron weights w[k] (shape (N+1, n_neurons) in base history). error_type: "a_priori".

Extra (when return_internal_states=True)

extra["centers_last"]: Final centers array (n_neurons, input_dim). extra["sigma_last"]: Final spreads array (n_neurons,). extra["last_activation"]: Last activation vector f(u_k) (n_neurons,). extra["last_regressor"]: Last regressor u_k (input_dim,).

class MultilayerPerceptron(pydaptivefiltering.AdaptiveFilter):
 49class MultilayerPerceptron(AdaptiveFilter):
 50    """
 51    Multilayer Perceptron (MLP) adaptive model with momentum (real-valued).
 52
 53    Online adaptation of a 2-hidden-layer feedforward neural network using a
 54    stochastic-gradient update with momentum. The model is treated as an
 55    adaptive nonlinear filter.
 56
 57    The forward pass is:
 58
 59    .. math::
 60        v_1[k] = W_1 u[k] - b_1, \\qquad y_1[k] = \\phi(v_1[k]),
 61
 62    .. math::
 63        v_2[k] = W_2 y_1[k] - b_2, \\qquad y_2[k] = \\phi(v_2[k]),
 64
 65    .. math::
 66        y[k] = w_3^T y_2[k] - b_3,
 67
 68    where ``\\phi`` is either ``tanh`` or ``sigmoid``.
 69
 70    Parameters
 71    ----------
 72    n_neurons : int, optional
 73        Number of neurons in each hidden layer. Default is 10.
 74    input_dim : int, optional
 75        Dimension of the regressor vector ``u[k]``. Default is 3.
 76        If :meth:`optimize` is called with a 1D input signal, this must be 3
 77        (see Notes).
 78    step_size : float, optional
 79        Gradient step size ``mu``. Default is 1e-2.
 80    momentum : float, optional
 81        Momentum factor in ``[0, 1)``. Default is 0.9.
 82    activation : {"tanh", "sigmoid"}, optional
 83        Activation function used in both hidden layers. Default is ``"tanh"``.
 84    w_init : array_like of float, optional
 85        Optional initialization for the output-layer weights ``w_3(0)``, with
 86        shape ``(n_neurons,)``. If None, Xavier/Glorot-style uniform
 87        initialization is used for all weights.
 88    rng : numpy.random.Generator, optional
 89        Random generator used for initialization.
 90
 91    Notes
 92    -----
 93    Real-valued only
 94        This implementation is restricted to real-valued signals and parameters
 95        (``supports_complex=False``). The constraint is enforced via
 96        ``@ensure_real_signals`` on :meth:`optimize`.
 97
 98    Input formats
 99        The method :meth:`optimize` accepts two input formats:
100
101        1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
102           each row is used directly as ``u[k]``.
103
104        2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
105           a 3-dimensional regressor is formed internally as
106
107           .. math::
108               u[k] = [x[k],\\ d[k-1],\\ x[k-1]]^T,
109
110           therefore this mode requires ``input_dim = 3``.
111
112    Parameter update (as implemented)
113        Let the a priori error be ``e[k] = d[k] - y[k]``. This implementation
114        applies a momentum update of the form
115
116        .. math::
117            \\theta[k+1] = \\theta[k] + \\Delta\\theta[k] + \\beta\\,\\Delta\\theta[k-1],
118
119        where ``\\beta`` is the momentum factor and ``\\Delta\\theta[k]`` is a
120        gradient step proportional to ``e[k]``. (See source for the exact
121        per-parameter expressions.)
122
123    Library conventions
124        - The base class ``filter_order`` is used only as a size indicator
125          (set to ``n_neurons - 1``).
126        - ``OptimizationResult.coefficients`` stores a *proxy* coefficient
127          history: the output-layer weight vector ``w3`` as tracked through
128          ``self.w`` for compatibility with the base API.
129        - Full parameter trajectories can be returned in ``result.extra`` when
130          ``return_internal_states=True``.
131
132    References
133    ----------
134    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
135       Implementation*, 5th ed., Algorithm 11.4 (MLP adaptive structure; here
136       extended with momentum and selectable activations).
137    """
138
139    supports_complex: bool = False
140
141    def __init__(
142        self,
143        n_neurons: int = 10,
144        input_dim: int = 3,
145        step_size: float = 0.01,
146        momentum: float = 0.9,
147        activation: str = "tanh",
148        w_init: Optional[ArrayLike] = None,
149        *,
150        rng: Optional[np.random.Generator] = None,
151    ) -> None:
152        n_neurons = int(n_neurons)
153        input_dim = int(input_dim)
154        if n_neurons <= 0:
155            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
156        if input_dim <= 0:
157            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
158        if not (0.0 <= float(momentum) < 1.0):
159            raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.")
160
161        super().__init__(filter_order=n_neurons - 1, w_init=None)
162
163        self.n_neurons = n_neurons
164        self.input_dim = input_dim
165        self.step_size = float(step_size)
166        self.momentum = float(momentum)
167
168        if activation == "tanh":
169            self.act_func = _tanh
170            self.act_deriv = _dtanh
171        elif activation == "sigmoid":
172            self.act_func = _sigmoid
173            self.act_deriv = _dsigmoid
174        else:
175            raise ValueError("activation must be 'tanh' or 'sigmoid'.")
176
177        self._rng = rng if rng is not None else np.random.default_rng()
178
179        limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons)))
180        limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons)))
181        limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1)))
182
183        self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64)
184        self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64)
185        self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64)
186
187        if w_init is not None:
188            w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
189            if w3_0.size != n_neurons:
190                raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.")
191            self.w3 = w3_0
192
193        self.b1 = np.zeros(n_neurons, dtype=np.float64)
194        self.b2 = np.zeros(n_neurons, dtype=np.float64)
195        self.b3 = 0.0
196
197        self.prev_dw1 = np.zeros_like(self.w1)
198        self.prev_dw2 = np.zeros_like(self.w2)
199        self.prev_dw3 = np.zeros_like(self.w3)
200        self.prev_db1 = np.zeros_like(self.b1)
201        self.prev_db2 = np.zeros_like(self.b2)
202        self.prev_db3 = 0.0
203
204        self.w = self.w3.copy()
205        self.w_history = []
206        self._record_history()
207
208    @staticmethod
209    def _as_regressor_matrix(
210        x_in: np.ndarray, d_in: np.ndarray, input_dim: int
211    ) -> Tuple[np.ndarray, bool]:
212        """
213        Return (U, is_multidim).
214
215        - If x_in is 2D: U = x_in
216        - If x_in is 1D: builds U[k]=[x[k], d[k-1], x[k-1]] and requires input_dim=3
217        """
218        x_in = np.asarray(x_in, dtype=np.float64)
219        d_in = np.asarray(d_in, dtype=np.float64).ravel()
220
221        if x_in.ndim == 2:
222            if x_in.shape[0] != d_in.size:
223                raise ValueError(f"Shape mismatch: input({x_in.shape[0]}) and desired({d_in.size}).")
224            if x_in.shape[1] != input_dim:
225                raise ValueError(f"input_signal second dim must be input_dim={input_dim}. Got {x_in.shape}.")
226            return x_in.astype(np.float64, copy=False), True
227
228        if x_in.ndim == 1:
229            if input_dim != 3:
230                raise ValueError(
231                    "When input_signal is 1D, this implementation uses u[k]=[x[k], d[k-1], x[k-1]] "
232                    "so input_dim must be 3."
233                )
234            if x_in.size != d_in.size:
235                raise ValueError(f"Shape mismatch: input({x_in.size}) and desired({d_in.size}).")
236
237            N = int(x_in.size)
238            U = np.zeros((N, 3), dtype=np.float64)
239            x_prev = 0.0
240            d_prev = 0.0
241            for k in range(N):
242                U[k, :] = np.array([x_in[k], d_prev, x_prev], dtype=np.float64)
243                x_prev = float(x_in[k])
244                d_prev = float(d_in[k])
245            return U, False
246
247        raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).")
248
249    @ensure_real_signals
250    def optimize(
251        self,
252        input_signal: Union[np.ndarray, list],
253        desired_signal: Union[np.ndarray, list],
254        verbose: bool = False,
255        return_internal_states: bool = False,
256    ) -> OptimizationResult:
257        """
258        Executes the online MLP adaptation loop (with momentum).
259
260        Parameters
261        ----------
262        input_signal : array_like of float
263            Either:
264            - regressor matrix ``U`` with shape ``(N, input_dim)``, or
265            - scalar input signal ``x[k]`` with shape ``(N,)`` (in which case the
266              regressor is built as ``u[k] = [x[k], d[k-1], x[k-1]]`` and
267              requires ``input_dim = 3``).
268        desired_signal : array_like of float
269            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
270        verbose : bool, optional
271            If True, prints the total runtime after completion.
272        return_internal_states : bool, optional
273            If True, stores parameter snapshots in ``result.extra`` (may be memory
274            intensive for long runs).
275
276        Returns
277        -------
278        OptimizationResult
279            Result object with fields:
280            - outputs : ndarray of float, shape ``(N,)``
281                Scalar output sequence ``y[k]``.
282            - errors : ndarray of float, shape ``(N,)``
283                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
284            - coefficients : ndarray of float
285                Proxy coefficient history recorded by the base class (tracks
286                the output-layer weights ``w3``).
287            - error_type : str
288                Set to ``"a_priori"``.
289            - extra : dict, optional
290                Present only if ``return_internal_states=True`` with:
291                - ``w1_hist`` : list of ndarray
292                    Hidden-layer-1 weight snapshots.
293                - ``w2_hist`` : list of ndarray
294                    Hidden-layer-2 weight snapshots.
295                - ``w3_hist`` : list of ndarray
296                    Output-layer weight snapshots.
297                - ``b1_hist`` : list of ndarray
298                    Bias-1 snapshots.
299                - ``b2_hist`` : list of ndarray
300                    Bias-2 snapshots.
301                - ``b3_hist`` : list of float
302                    Bias-3 snapshots.
303                - ``activation`` : str
304                    Activation identifier (``"tanh"`` or ``"sigmoid"``).
305        """
306        t0 = perf_counter()
307
308        x_in = np.asarray(input_signal, dtype=np.float64)
309        d_in = np.asarray(desired_signal, dtype=np.float64).ravel()
310
311        U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim)
312        N = int(U.shape[0])
313
314        outputs = np.zeros(N, dtype=np.float64)
315        errors = np.zeros(N, dtype=np.float64)
316
317        w1_hist: List[np.ndarray] = []
318        w2_hist: List[np.ndarray] = []
319        w3_hist: List[np.ndarray] = []
320        b1_hist: List[np.ndarray] = []
321        b2_hist: List[np.ndarray] = []
322        b3_hist: List[float] = []
323
324        for k in range(N):
325            u = U[k, :]
326
327            v1 = (self.w1 @ u) - self.b1
328            y1 = self.act_func(v1)
329
330            v2 = (self.w2 @ y1) - self.b2
331            y2 = self.act_func(v2)
332
333            y_k = float(np.dot(y2, self.w3) - self.b3)
334            outputs[k] = y_k
335            e_k = float(d_in[k] - y_k)
336            errors[k] = e_k
337
338            er_hid2 = e_k * self.w3 * self.act_deriv(v2)
339            er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1)
340
341            dw3 = (2.0 * self.step_size) * e_k * y2
342            self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3
343            self.prev_dw3 = dw3
344
345            db3 = (-2.0 * self.step_size) * e_k
346            self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3)
347            self.prev_db3 = db3
348
349            dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1)
350            self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2
351            self.prev_dw2 = dw2
352
353            db2 = (-2.0 * self.step_size) * er_hid2
354            self.b2 = self.b2 + db2 + self.momentum * self.prev_db2
355            self.prev_db2 = db2
356
357            dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u)
358            self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1
359            self.prev_dw1 = dw1
360
361            db1 = (-2.0 * self.step_size) * er_hid1
362            self.b1 = self.b1 + db1 + self.momentum * self.prev_db1
363            self.prev_db1 = db1
364
365            self.w = self.w3.copy()
366            self._record_history()
367
368            if return_internal_states:
369                w1_hist.append(self.w1.copy())
370                w2_hist.append(self.w2.copy())
371                w3_hist.append(self.w3.copy())
372                b1_hist.append(self.b1.copy())
373                b2_hist.append(self.b2.copy())
374                b3_hist.append(float(self.b3))
375
376        runtime_s = float(perf_counter() - t0)
377        if verbose:
378            print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms")
379
380        extra: Optional[Dict[str, Any]] = None
381        if return_internal_states:
382            extra = {
383                "w1_hist": w1_hist,
384                "w2_hist": w2_hist,
385                "w3_hist": w3_hist,
386                "b1_hist": b1_hist,
387                "b2_hist": b2_hist,
388                "b3_hist": b3_hist,
389                "activation": "tanh" if self.act_func is _tanh else "sigmoid",
390            }
391
392        return self._pack_results(
393            outputs=outputs,
394            errors=errors,
395            runtime_s=runtime_s,
396            error_type="a_priori",
397            extra=extra,
398        )

Multilayer Perceptron (MLP) adaptive model with momentum (real-valued).

Online adaptation of a 2-hidden-layer feedforward neural network using a stochastic-gradient update with momentum. The model is treated as an adaptive nonlinear filter.

The forward pass is:

$$v_1[k] = W_1 u[k] - b_1, \qquad y_1[k] = \phi(v_1[k]),$$

$$v_2[k] = W_2 y_1[k] - b_2, \qquad y_2[k] = \phi(v_2[k]),$$

$$y[k] = w_3^T y_2[k] - b_3,$$

where \phi is either tanh or sigmoid.

Parameters

n_neurons : int, optional Number of neurons in each hidden layer. Default is 10. input_dim : int, optional Dimension of the regressor vector u[k]. Default is 3. If optimize() is called with a 1D input signal, this must be 3 (see Notes). step_size : float, optional Gradient step size mu. Default is 1e-2. momentum : float, optional Momentum factor in [0, 1). Default is 0.9. activation : {"tanh", "sigmoid"}, optional Activation function used in both hidden layers. Default is "tanh". w_init : array_like of float, optional Optional initialization for the output-layer weights w_3(0), with shape (n_neurons,). If None, Xavier/Glorot-style uniform initialization is used for all weights. rng : numpy.random.Generator, optional Random generator used for initialization.

Notes

Real-valued only This implementation is restricted to real-valued signals and parameters (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Input formats The method optimize() accepts two input formats:

1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
   each row is used directly as ``u[k]``.

2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
   a 3-dimensional regressor is formed internally as

   $$u[k] = [x[k],\ d[k-1],\ x[k-1]]^T,$$

   therefore this mode requires ``input_dim = 3``.

Parameter update (as implemented) Let the a priori error be e[k] = d[k] - y[k]. This implementation applies a momentum update of the form

$$\theta[k+1] = \theta[k] + \Delta\theta[k] + \beta\,\Delta\theta[k-1],$$

where ``\beta`` is the momentum factor and ``\Delta\theta[k]`` is a
gradient step proportional to ``e[k]``. (See source for the exact
per-parameter expressions.)

Library conventions - The base class filter_order is used only as a size indicator (set to n_neurons - 1). - OptimizationResult.coefficients stores a proxy coefficient history: the output-layer weight vector w3 as tracked through self.w for compatibility with the base API. - Full parameter trajectories can be returned in result.extra when return_internal_states=True.

References


MultilayerPerceptron( n_neurons: int = 10, input_dim: int = 3, step_size: float = 0.01, momentum: float = 0.9, activation: str = 'tanh', w_init: Union[numpy.ndarray, list, NoneType] = None, *, rng: Optional[numpy.random._generator.Generator] = None)
141    def __init__(
142        self,
143        n_neurons: int = 10,
144        input_dim: int = 3,
145        step_size: float = 0.01,
146        momentum: float = 0.9,
147        activation: str = "tanh",
148        w_init: Optional[ArrayLike] = None,
149        *,
150        rng: Optional[np.random.Generator] = None,
151    ) -> None:
152        n_neurons = int(n_neurons)
153        input_dim = int(input_dim)
154        if n_neurons <= 0:
155            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
156        if input_dim <= 0:
157            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
158        if not (0.0 <= float(momentum) < 1.0):
159            raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.")
160
161        super().__init__(filter_order=n_neurons - 1, w_init=None)
162
163        self.n_neurons = n_neurons
164        self.input_dim = input_dim
165        self.step_size = float(step_size)
166        self.momentum = float(momentum)
167
168        if activation == "tanh":
169            self.act_func = _tanh
170            self.act_deriv = _dtanh
171        elif activation == "sigmoid":
172            self.act_func = _sigmoid
173            self.act_deriv = _dsigmoid
174        else:
175            raise ValueError("activation must be 'tanh' or 'sigmoid'.")
176
177        self._rng = rng if rng is not None else np.random.default_rng()
178
179        limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons)))
180        limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons)))
181        limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1)))
182
183        self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64)
184        self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64)
185        self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64)
186
187        if w_init is not None:
188            w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
189            if w3_0.size != n_neurons:
190                raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.")
191            self.w3 = w3_0
192
193        self.b1 = np.zeros(n_neurons, dtype=np.float64)
194        self.b2 = np.zeros(n_neurons, dtype=np.float64)
195        self.b3 = 0.0
196
197        self.prev_dw1 = np.zeros_like(self.w1)
198        self.prev_dw2 = np.zeros_like(self.w2)
199        self.prev_dw3 = np.zeros_like(self.w3)
200        self.prev_db1 = np.zeros_like(self.b1)
201        self.prev_db2 = np.zeros_like(self.b2)
202        self.prev_db3 = 0.0
203
204        self.w = self.w3.copy()
205        self.w_history = []
206        self._record_history()
supports_complex: bool = False
n_neurons
input_dim
step_size
momentum
w1
w2
w3
b1
b2
b3
prev_dw1
prev_dw2
prev_dw3
prev_db1
prev_db2
prev_db3
w
w_history
@ensure_real_signals
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list], verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
249    @ensure_real_signals
250    def optimize(
251        self,
252        input_signal: Union[np.ndarray, list],
253        desired_signal: Union[np.ndarray, list],
254        verbose: bool = False,
255        return_internal_states: bool = False,
256    ) -> OptimizationResult:
257        """
258        Executes the online MLP adaptation loop (with momentum).
259
260        Parameters
261        ----------
262        input_signal : array_like of float
263            Either:
264            - regressor matrix ``U`` with shape ``(N, input_dim)``, or
265            - scalar input signal ``x[k]`` with shape ``(N,)`` (in which case the
266              regressor is built as ``u[k] = [x[k], d[k-1], x[k-1]]`` and
267              requires ``input_dim = 3``).
268        desired_signal : array_like of float
269            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
270        verbose : bool, optional
271            If True, prints the total runtime after completion.
272        return_internal_states : bool, optional
273            If True, stores parameter snapshots in ``result.extra`` (may be memory
274            intensive for long runs).
275
276        Returns
277        -------
278        OptimizationResult
279            Result object with fields:
280            - outputs : ndarray of float, shape ``(N,)``
281                Scalar output sequence ``y[k]``.
282            - errors : ndarray of float, shape ``(N,)``
283                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
284            - coefficients : ndarray of float
285                Proxy coefficient history recorded by the base class (tracks
286                the output-layer weights ``w3``).
287            - error_type : str
288                Set to ``"a_priori"``.
289            - extra : dict, optional
290                Present only if ``return_internal_states=True`` with:
291                - ``w1_hist`` : list of ndarray
292                    Hidden-layer-1 weight snapshots.
293                - ``w2_hist`` : list of ndarray
294                    Hidden-layer-2 weight snapshots.
295                - ``w3_hist`` : list of ndarray
296                    Output-layer weight snapshots.
297                - ``b1_hist`` : list of ndarray
298                    Bias-1 snapshots.
299                - ``b2_hist`` : list of ndarray
300                    Bias-2 snapshots.
301                - ``b3_hist`` : list of float
302                    Bias-3 snapshots.
303                - ``activation`` : str
304                    Activation identifier (``"tanh"`` or ``"sigmoid"``).
305        """
306        t0 = perf_counter()
307
308        x_in = np.asarray(input_signal, dtype=np.float64)
309        d_in = np.asarray(desired_signal, dtype=np.float64).ravel()
310
311        U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim)
312        N = int(U.shape[0])
313
314        outputs = np.zeros(N, dtype=np.float64)
315        errors = np.zeros(N, dtype=np.float64)
316
317        w1_hist: List[np.ndarray] = []
318        w2_hist: List[np.ndarray] = []
319        w3_hist: List[np.ndarray] = []
320        b1_hist: List[np.ndarray] = []
321        b2_hist: List[np.ndarray] = []
322        b3_hist: List[float] = []
323
324        for k in range(N):
325            u = U[k, :]
326
327            v1 = (self.w1 @ u) - self.b1
328            y1 = self.act_func(v1)
329
330            v2 = (self.w2 @ y1) - self.b2
331            y2 = self.act_func(v2)
332
333            y_k = float(np.dot(y2, self.w3) - self.b3)
334            outputs[k] = y_k
335            e_k = float(d_in[k] - y_k)
336            errors[k] = e_k
337
338            er_hid2 = e_k * self.w3 * self.act_deriv(v2)
339            er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1)
340
341            dw3 = (2.0 * self.step_size) * e_k * y2
342            self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3
343            self.prev_dw3 = dw3
344
345            db3 = (-2.0 * self.step_size) * e_k
346            self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3)
347            self.prev_db3 = db3
348
349            dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1)
350            self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2
351            self.prev_dw2 = dw2
352
353            db2 = (-2.0 * self.step_size) * er_hid2
354            self.b2 = self.b2 + db2 + self.momentum * self.prev_db2
355            self.prev_db2 = db2
356
357            dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u)
358            self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1
359            self.prev_dw1 = dw1
360
361            db1 = (-2.0 * self.step_size) * er_hid1
362            self.b1 = self.b1 + db1 + self.momentum * self.prev_db1
363            self.prev_db1 = db1
364
365            self.w = self.w3.copy()
366            self._record_history()
367
368            if return_internal_states:
369                w1_hist.append(self.w1.copy())
370                w2_hist.append(self.w2.copy())
371                w3_hist.append(self.w3.copy())
372                b1_hist.append(self.b1.copy())
373                b2_hist.append(self.b2.copy())
374                b3_hist.append(float(self.b3))
375
376        runtime_s = float(perf_counter() - t0)
377        if verbose:
378            print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms")
379
380        extra: Optional[Dict[str, Any]] = None
381        if return_internal_states:
382            extra = {
383                "w1_hist": w1_hist,
384                "w2_hist": w2_hist,
385                "w3_hist": w3_hist,
386                "b1_hist": b1_hist,
387                "b2_hist": b2_hist,
388                "b3_hist": b3_hist,
389                "activation": "tanh" if self.act_func is _tanh else "sigmoid",
390            }
391
392        return self._pack_results(
393            outputs=outputs,
394            errors=errors,
395            runtime_s=runtime_s,
396            error_type="a_priori",
397            extra=extra,
398        )

Executes the online MLP adaptation loop (with momentum).

Parameters

input_signal : array_like of float Either: - regressor matrix U with shape (N, input_dim), or - scalar input signal x[k] with shape (N,) (in which case the regressor is built as u[k] = [x[k], d[k-1], x[k-1]] and requires input_dim = 3). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, stores parameter snapshots in result.extra (may be memory intensive for long runs).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence y[k]. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Proxy coefficient history recorded by the base class (tracks the output-layer weights w3). - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - w1_hist : list of ndarray Hidden-layer-1 weight snapshots. - w2_hist : list of ndarray Hidden-layer-2 weight snapshots. - w3_hist : list of ndarray Output-layer weight snapshots. - b1_hist : list of ndarray Bias-1 snapshots. - b2_hist : list of ndarray Bias-2 snapshots. - b3_hist : list of float Bias-3 snapshots. - activation : str Activation identifier ("tanh" or "sigmoid").

class RBF(pydaptivefiltering.AdaptiveFilter):
 29class RBF(AdaptiveFilter):
 30    """
 31    Radial Basis Function (RBF) adaptive model (real-valued).
 32
 33    Online adaptation of an RBF network with Gaussian basis functions, following
 34    Diniz (Alg. 11.5). The algorithm updates:
 35    - output weights ``w`` (one weight per neuron),
 36    - centers ``c_i`` (stored in ``vet``),
 37    - spreads ``sigma_i`` (stored in ``sigma``).
 38
 39    Parameters
 40    ----------
 41    n_neurons : int
 42        Number of RBF neurons (basis functions).
 43    input_dim : int
 44        Dimension of the regressor vector ``u[k]``. If :meth:`optimize` is called
 45        with a 1D input signal, this is interpreted as the tap length.
 46    ur : float, optional
 47        Step size for center updates. Default is 1e-2.
 48    uw : float, optional
 49        Step size for output-weight updates. Default is 1e-2.
 50    us : float, optional
 51        Step size for spread (sigma) updates. Default is 1e-2.
 52    w_init : array_like of float, optional
 53        Initial output-weight vector ``w(0)`` with shape ``(n_neurons,)``.
 54        If None, initializes from a standard normal distribution.
 55    sigma_init : float, optional
 56        Initial spread value used for all neurons (must be positive). Default is 1.0.
 57    centers_init_scale : float, optional
 58        Scale used for random initialization of centers. Default is 0.5.
 59    rng : numpy.random.Generator, optional
 60        Random generator used for reproducible initialization.
 61    safe_eps : float, optional
 62        Small positive constant used to guard denominators (e.g., ``sigma^2`` and
 63        ``sigma^3``). Default is 1e-12.
 64
 65    Notes
 66    -----
 67    Real-valued only
 68        This implementation is restricted to real-valued signals and parameters
 69        (``supports_complex=False``). The constraint is enforced via
 70        ``@ensure_real_signals`` on :meth:`optimize`.
 71
 72    Model
 73        For a regressor vector ``u[k] \\in \\mathbb{R}^{D}``, define Gaussian basis
 74        functions:
 75
 76        .. math::
 77            \\phi_i(u[k]) = \\exp\\left(-\\frac{\\|u[k] - c_i\\|^2}{\\sigma_i^2}\\right),
 78
 79        where ``c_i`` is the center and ``sigma_i > 0`` is the spread of neuron ``i``.
 80        The network output is
 81
 82        .. math::
 83            y[k] = \\sum_{i=1}^{Q} w_i\\, \\phi_i(u[k]) = w^T \\phi(u[k]),
 84
 85        where ``Q = n_neurons`` and ``\\phi(u[k]) \\in \\mathbb{R}^{Q}`` stacks all
 86        activations.
 87
 88    Input formats
 89        The method :meth:`optimize` accepts two input formats:
 90
 91        1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
 92           each row is used directly as ``u[k]``.
 93
 94        2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
 95           tapped-delay regressors of length ``input_dim`` are built as
 96
 97           .. math::
 98               u[k] = [x[k], x[k-1], \\ldots, x[k-input\\_dim+1]]^T.
 99
100    Library conventions
101        - ``OptimizationResult.coefficients`` stores the history of the **output
102          weights** ``w`` (the neuron output layer).
103        - Centers and spreads are returned via ``result.extra`` when
104          ``return_internal_states=True``.
105
106    References
107    ----------
108    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
109       Implementation*, 5th ed., Algorithm 11.5.
110    """
111    supports_complex: bool = False
112
113    def __init__(
114        self,
115        n_neurons: int,
116        input_dim: int,
117        ur: float = 0.01,
118        uw: float = 0.01,
119        us: float = 0.01,
120        w_init: Optional[ArrayLike] = None,
121        *,
122        sigma_init: float = 1.0,
123        centers_init_scale: float = 0.5,
124        rng: Optional[np.random.Generator] = None,
125        safe_eps: float = 1e-12,
126    ) -> None:
127        n_neurons = int(n_neurons)
128        input_dim = int(input_dim)
129        if n_neurons <= 0:
130            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
131        if input_dim <= 0:
132            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
133        if float(sigma_init) <= 0.0:
134            raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.")
135
136        super().__init__(filter_order=n_neurons - 1, w_init=None)
137
138        self.n_neurons = n_neurons
139        self.input_dim = input_dim
140        self.ur = float(ur)
141        self.uw = float(uw)
142        self.us = float(us)
143
144        self._safe_eps = float(safe_eps)
145        self._rng = rng if rng is not None else np.random.default_rng()
146
147        if w_init is None:
148            self.w = self._rng.standard_normal(n_neurons).astype(np.float64)
149        else:
150            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
151            if w0.size != n_neurons:
152                raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.")
153            self.w = w0
154
155        self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype(
156            np.float64
157        )
158        self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init)
159
160        self.w_history = []
161        self._record_history()
162
163    @staticmethod
164    def _build_regressors_1d(x: np.ndarray, input_dim: int) -> np.ndarray:
165        """Build tapped-delay regressors u[k]=[x[k], x[k-1], ..., x[k-input_dim+1]]."""
166        x = np.asarray(x, dtype=np.float64).ravel()
167        N = int(x.size)
168        m = int(input_dim) - 1
169        x_pad = np.zeros(N + m, dtype=np.float64)
170        x_pad[m:] = x
171        return np.array([x_pad[k : k + m + 1][::-1] for k in range(N)], dtype=np.float64)
172
173    @staticmethod
174    def _as_regressor_matrix(x_in: np.ndarray, input_dim: int) -> Tuple[np.ndarray, int]:
175        """Return (U, N) from either (N,input_dim) or (N,) input."""
176        x_in = np.asarray(x_in, dtype=np.float64)
177        if x_in.ndim == 2:
178            if x_in.shape[1] != input_dim:
179                raise ValueError(f"input_signal must have shape (N,{input_dim}). Got {x_in.shape}.")
180            return x_in.astype(np.float64, copy=False), int(x_in.shape[0])
181        if x_in.ndim == 1:
182            U = RBF._build_regressors_1d(x_in, input_dim=input_dim)
183            return U, int(U.shape[0])
184        raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).")
185
186    @ensure_real_signals
187    def optimize(
188        self,
189        input_signal: Union[np.ndarray, list],
190        desired_signal: Union[np.ndarray, list],
191        verbose: bool = False,
192        return_internal_states: bool = False,
193    ) -> OptimizationResult:
194        """
195        Executes the RBF online adaptation loop.
196
197        Parameters
198        ----------
199        input_signal : array_like of float
200            Either:
201            - regressor matrix ``U`` with shape ``(N, input_dim)``, or
202            - scalar input signal ``x[k]`` with shape ``(N,)`` (tapped-delay
203              regressors are built internally).
204        desired_signal : array_like of float
205            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
206        verbose : bool, optional
207            If True, prints the total runtime after completion.
208        return_internal_states : bool, optional
209            If True, includes final centers/spreads and last activation vector
210            in ``result.extra``.
211
212        Returns
213        -------
214        OptimizationResult
215            Result object with fields:
216            - outputs : ndarray of float, shape ``(N,)``
217                Scalar output sequence ``y[k] = w^T \\phi(u[k])``.
218            - errors : ndarray of float, shape ``(N,)``
219                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
220            - coefficients : ndarray of float
221                Output-weight history recorded by the base class.
222            - error_type : str
223                Set to ``"a_priori"``.
224            - extra : dict, optional
225                Present only if ``return_internal_states=True`` with:
226                - ``centers_last`` : ndarray of float
227                    Final centers array (shape ``(n_neurons, input_dim)``).
228                - ``sigma_last`` : ndarray of float
229                    Final spreads vector (shape ``(n_neurons,)``).
230                - ``last_phi`` : ndarray of float
231                    Last basis-function activation vector ``\\phi(u[k])`` (shape ``(n_neurons,)``).
232        """
233        t0 = perf_counter()
234
235        x_in = np.asarray(input_signal, dtype=np.float64)
236        d_in = np.asarray(desired_signal, dtype=np.float64).ravel()
237
238        U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim)
239        if d_in.size != N:
240            raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).")
241
242        outputs = np.zeros(N, dtype=np.float64)
243        errors = np.zeros(N, dtype=np.float64)
244
245        last_phi: Optional[np.ndarray] = None
246
247        for k in range(N):
248            u = U[k, :]
249
250            diff = u[None, :] - self.vet
251            dis_sq = np.sum(diff * diff, axis=1)
252
253            sigma_sq = (self.sigma * self.sigma) + self._safe_eps
254            phi = np.exp(-dis_sq / sigma_sq)
255            last_phi = phi
256
257            y_k = float(np.dot(self.w, phi))
258            outputs[k] = y_k
259
260            e_k = float(d_in[k] - y_k)
261            errors[k] = e_k
262
263            self.w = self.w + (2.0 * self.uw) * e_k * phi
264
265            sigma_cu = np.maximum(self.sigma, self._safe_eps)
266            self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3)
267
268            denom_c = (sigma_cu**2) + self._safe_eps
269            for p in range(self.n_neurons):
270                self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p]
271
272            self._record_history()
273
274        runtime_s = float(perf_counter() - t0)
275        if verbose:
276            print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms")
277
278        extra: Optional[Dict[str, Any]] = None
279        if return_internal_states:
280            extra = {
281                "centers_last": self.vet.copy(),
282                "sigma_last": self.sigma.copy(),
283                "last_phi": None if last_phi is None else last_phi.copy(),
284            }
285
286        return self._pack_results(
287            outputs=outputs,
288            errors=errors,
289            runtime_s=runtime_s,
290            error_type="a_priori",
291            extra=extra,
292        )

Radial Basis Function (RBF) adaptive model (real-valued).

Online adaptation of an RBF network with Gaussian basis functions, following Diniz (Alg. 11.5). The algorithm updates:

  • output weights w (one weight per neuron),
  • centers c_i (stored in vet),
  • spreads sigma_i (stored in sigma).

Parameters

n_neurons : int Number of RBF neurons (basis functions). input_dim : int Dimension of the regressor vector u[k]. If optimize() is called with a 1D input signal, this is interpreted as the tap length. ur : float, optional Step size for center updates. Default is 1e-2. uw : float, optional Step size for output-weight updates. Default is 1e-2. us : float, optional Step size for spread (sigma) updates. Default is 1e-2. w_init : array_like of float, optional Initial output-weight vector w(0) with shape (n_neurons,). If None, initializes from a standard normal distribution. sigma_init : float, optional Initial spread value used for all neurons (must be positive). Default is 1.0. centers_init_scale : float, optional Scale used for random initialization of centers. Default is 0.5. rng : numpy.random.Generator, optional Random generator used for reproducible initialization. safe_eps : float, optional Small positive constant used to guard denominators (e.g., sigma^2 and sigma^3). Default is 1e-12.

Notes

Real-valued only This implementation is restricted to real-valued signals and parameters (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Model For a regressor vector u[k] \in \mathbb{R}^{D}, define Gaussian basis functions:

$$\phi_i(u[k]) = \exp\left(-\frac{\|u[k] - c_i\|^2}{\sigma_i^2}\right),$$

where ``c_i`` is the center and ``sigma_i > 0`` is the spread of neuron ``i``.
The network output is

$$y[k] = \sum_{i=1}^{Q} w_i\, \phi_i(u[k]) = w^T \phi(u[k]),$$

where ``Q = n_neurons`` and ``\phi(u[k]) \in \mathbb{R}^{Q}`` stacks all
activations.

Input formats The method optimize() accepts two input formats:

1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
   each row is used directly as ``u[k]``.

2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
   tapped-delay regressors of length ``input_dim`` are built as

   $$u[k] = [x[k], x[k-1], \ldots, x[k-input\_dim+1]]^T.$$

Library conventions - OptimizationResult.coefficients stores the history of the output weights w (the neuron output layer). - Centers and spreads are returned via result.extra when return_internal_states=True.

References


RBF( n_neurons: int, input_dim: int, ur: float = 0.01, uw: float = 0.01, us: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, sigma_init: float = 1.0, centers_init_scale: float = 0.5, rng: Optional[numpy.random._generator.Generator] = None, safe_eps: float = 1e-12)
113    def __init__(
114        self,
115        n_neurons: int,
116        input_dim: int,
117        ur: float = 0.01,
118        uw: float = 0.01,
119        us: float = 0.01,
120        w_init: Optional[ArrayLike] = None,
121        *,
122        sigma_init: float = 1.0,
123        centers_init_scale: float = 0.5,
124        rng: Optional[np.random.Generator] = None,
125        safe_eps: float = 1e-12,
126    ) -> None:
127        n_neurons = int(n_neurons)
128        input_dim = int(input_dim)
129        if n_neurons <= 0:
130            raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.")
131        if input_dim <= 0:
132            raise ValueError(f"input_dim must be > 0. Got {input_dim}.")
133        if float(sigma_init) <= 0.0:
134            raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.")
135
136        super().__init__(filter_order=n_neurons - 1, w_init=None)
137
138        self.n_neurons = n_neurons
139        self.input_dim = input_dim
140        self.ur = float(ur)
141        self.uw = float(uw)
142        self.us = float(us)
143
144        self._safe_eps = float(safe_eps)
145        self._rng = rng if rng is not None else np.random.default_rng()
146
147        if w_init is None:
148            self.w = self._rng.standard_normal(n_neurons).astype(np.float64)
149        else:
150            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
151            if w0.size != n_neurons:
152                raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.")
153            self.w = w0
154
155        self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype(
156            np.float64
157        )
158        self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init)
159
160        self.w_history = []
161        self._record_history()
supports_complex: bool = False
n_neurons
input_dim
ur
uw
us
vet
sigma
w_history
@ensure_real_signals
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list], verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
186    @ensure_real_signals
187    def optimize(
188        self,
189        input_signal: Union[np.ndarray, list],
190        desired_signal: Union[np.ndarray, list],
191        verbose: bool = False,
192        return_internal_states: bool = False,
193    ) -> OptimizationResult:
194        """
195        Executes the RBF online adaptation loop.
196
197        Parameters
198        ----------
199        input_signal : array_like of float
200            Either:
201            - regressor matrix ``U`` with shape ``(N, input_dim)``, or
202            - scalar input signal ``x[k]`` with shape ``(N,)`` (tapped-delay
203              regressors are built internally).
204        desired_signal : array_like of float
205            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
206        verbose : bool, optional
207            If True, prints the total runtime after completion.
208        return_internal_states : bool, optional
209            If True, includes final centers/spreads and last activation vector
210            in ``result.extra``.
211
212        Returns
213        -------
214        OptimizationResult
215            Result object with fields:
216            - outputs : ndarray of float, shape ``(N,)``
217                Scalar output sequence ``y[k] = w^T \\phi(u[k])``.
218            - errors : ndarray of float, shape ``(N,)``
219                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
220            - coefficients : ndarray of float
221                Output-weight history recorded by the base class.
222            - error_type : str
223                Set to ``"a_priori"``.
224            - extra : dict, optional
225                Present only if ``return_internal_states=True`` with:
226                - ``centers_last`` : ndarray of float
227                    Final centers array (shape ``(n_neurons, input_dim)``).
228                - ``sigma_last`` : ndarray of float
229                    Final spreads vector (shape ``(n_neurons,)``).
230                - ``last_phi`` : ndarray of float
231                    Last basis-function activation vector ``\\phi(u[k])`` (shape ``(n_neurons,)``).
232        """
233        t0 = perf_counter()
234
235        x_in = np.asarray(input_signal, dtype=np.float64)
236        d_in = np.asarray(desired_signal, dtype=np.float64).ravel()
237
238        U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim)
239        if d_in.size != N:
240            raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).")
241
242        outputs = np.zeros(N, dtype=np.float64)
243        errors = np.zeros(N, dtype=np.float64)
244
245        last_phi: Optional[np.ndarray] = None
246
247        for k in range(N):
248            u = U[k, :]
249
250            diff = u[None, :] - self.vet
251            dis_sq = np.sum(diff * diff, axis=1)
252
253            sigma_sq = (self.sigma * self.sigma) + self._safe_eps
254            phi = np.exp(-dis_sq / sigma_sq)
255            last_phi = phi
256
257            y_k = float(np.dot(self.w, phi))
258            outputs[k] = y_k
259
260            e_k = float(d_in[k] - y_k)
261            errors[k] = e_k
262
263            self.w = self.w + (2.0 * self.uw) * e_k * phi
264
265            sigma_cu = np.maximum(self.sigma, self._safe_eps)
266            self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3)
267
268            denom_c = (sigma_cu**2) + self._safe_eps
269            for p in range(self.n_neurons):
270                self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p]
271
272            self._record_history()
273
274        runtime_s = float(perf_counter() - t0)
275        if verbose:
276            print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms")
277
278        extra: Optional[Dict[str, Any]] = None
279        if return_internal_states:
280            extra = {
281                "centers_last": self.vet.copy(),
282                "sigma_last": self.sigma.copy(),
283                "last_phi": None if last_phi is None else last_phi.copy(),
284            }
285
286        return self._pack_results(
287            outputs=outputs,
288            errors=errors,
289            runtime_s=runtime_s,
290            error_type="a_priori",
291            extra=extra,
292        )

Executes the RBF online adaptation loop.

Parameters

input_signal : array_like of float Either: - regressor matrix U with shape (N, input_dim), or - scalar input signal x[k] with shape (N,) (tapped-delay regressors are built internally). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes final centers/spreads and last activation vector in result.extra.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar output sequence y[k] = w^T \phi(u[k]). - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Output-weight history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - centers_last : ndarray of float Final centers array (shape (n_neurons, input_dim)). - sigma_last : ndarray of float Final spreads vector (shape (n_neurons,)). - last_phi : ndarray of float Last basis-function activation vector \phi(u[k]) (shape (n_neurons,)).

class VolterraLMS(pydaptivefiltering.AdaptiveFilter):
 29class VolterraLMS(AdaptiveFilter):
 30    """
 31    Second-order Volterra LMS adaptive filter (real-valued).
 32
 33    Volterra LMS (Diniz, Alg. 11.1) using a second-order Volterra expansion.
 34    The adaptive model augments a linear tapped-delay regressor with all
 35    quadratic products (including squares) and performs an LMS-type update on
 36    the expanded coefficient vector.
 37
 38    Parameters
 39    ----------
 40    memory : int, optional
 41        Linear memory length ``L``. The linear delay line is
 42        ``[x[k], x[k-1], ..., x[k-L+1]]``. Default is 3.
 43    step : float or array_like of float, optional
 44        Step size ``mu``. Can be either:
 45        - a scalar (same step for all coefficients), or
 46        - a vector with shape ``(n_coeffs,)`` for per-term step scaling.
 47        Default is 1e-2.
 48    w_init : array_like of float, optional
 49        Initial coefficient vector ``w(0)`` with shape ``(n_coeffs,)``. If None,
 50        initializes with zeros.
 51    safe_eps : float, optional
 52        Small positive constant kept for API consistency across the library.
 53        (Not used directly by this implementation.) Default is 1e-12.
 54
 55    Notes
 56    -----
 57    Real-valued only
 58        This implementation is restricted to real-valued signals and coefficients
 59        (``supports_complex=False``). The constraint is enforced via
 60        ``@ensure_real_signals`` on :meth:`optimize`.
 61
 62    Volterra regressor (as implemented)
 63        Let the linear delay line be
 64
 65        .. math::
 66            x_{lin}[k] = [x[k], x[k-1], \\ldots, x[k-L+1]]^T \\in \\mathbb{R}^{L}.
 67
 68        The second-order Volterra regressor is constructed as
 69
 70        .. math::
 71            u[k] =
 72            \\begin{bmatrix}
 73                x_{lin}[k] \\\\
 74                \\mathrm{vec}\\bigl(x_{lin}[k] x_{lin}^T[k]\\bigr)_{i \\le j}
 75            \\end{bmatrix}
 76            \\in \\mathbb{R}^{n_{coeffs}},
 77
 78        where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
 79        for ``0 \\le i \\le j \\le L-1`` (unique terms only).
 80
 81        The number of coefficients is therefore
 82
 83        .. math::
 84            n_{coeffs} = L + \\frac{L(L+1)}{2}.
 85
 86    LMS recursion (a priori)
 87        With
 88
 89        .. math::
 90            y[k] = w^T[k] u[k], \\qquad e[k] = d[k] - y[k],
 91
 92        the update implemented here is
 93
 94        .. math::
 95            w[k+1] = w[k] + 2\\mu\\, e[k] \\, u[k],
 96
 97        where ``\\mu`` may be scalar or element-wise (vector step).
 98
 99    Implementation details
100        - The coefficient vector ``self.w`` stores the full Volterra parameter
101          vector (linear + quadratic) and is recorded by the base class.
102        - The quadratic term ordering matches the nested loops used in
103          :meth:`_create_volterra_regressor` (i increasing, j from i to L-1).
104
105    References
106    ----------
107    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
108       Implementation*, 5th ed., Algorithm 11.1.
109    """
110
111    supports_complex: bool = False
112
113    def __init__(
114        self,
115        memory: int = 3,
116        step: Union[float, np.ndarray, list] = 1e-2,
117        w_init: Optional[ArrayLike] = None,
118        *,
119        safe_eps: float = 1e-12,
120    ) -> None:
121        memory = int(memory)
122        if memory <= 0:
123            raise ValueError(f"memory must be > 0. Got {memory}.")
124
125        self.memory: int = memory
126        self.n_coeffs: int = memory + (memory * (memory + 1)) // 2
127        self._safe_eps: float = float(safe_eps)
128
129        super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init)
130
131        if isinstance(step, (list, np.ndarray)):
132            step_vec = np.asarray(step, dtype=np.float64).reshape(-1)
133            if step_vec.size != self.n_coeffs:
134                raise ValueError(
135                    f"step vector must have length {self.n_coeffs}, got {step_vec.size}."
136                )
137            self.step: Union[float, np.ndarray] = step_vec
138        else:
139            self.step = float(step)
140
141        self.w = np.asarray(self.w, dtype=np.float64)
142
143        self.w_history = []
144        self._record_history()
145
146    def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray:
147        """
148        Constructs the second-order Volterra regressor from a linear delay line.
149
150        Parameters
151        ----------
152        x_lin : ndarray of float
153            Linear delay line with shape ``(L,)`` ordered as
154            ``[x[k], x[k-1], ..., x[k-L+1]]``.
155
156        Returns
157        -------
158        ndarray of float
159            Volterra regressor ``u[k]`` with shape ``(n_coeffs,)`` containing:
160            - linear terms, followed by
161            - quadratic terms for ``i <= j``.
162        """
163        x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1)
164        if x_lin.size != self.memory:
165            raise ValueError(
166                f"x_lin must have length {self.memory}, got {x_lin.size}."
167            )
168
169        quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64)
170        idx = 0
171        for i in range(self.memory):
172            for j in range(i, self.memory):
173                quad[idx] = x_lin[i] * x_lin[j]
174                idx += 1
175
176        return np.concatenate([x_lin, quad], axis=0)
177
178    @ensure_real_signals
179    def optimize(
180        self,
181        input_signal: Union[np.ndarray, list],
182        desired_signal: Union[np.ndarray, list],
183        verbose: bool = False,
184        return_internal_states: bool = False,
185    ) -> OptimizationResult:
186        """
187        Executes the Volterra LMS adaptation loop over paired input/desired sequences.
188
189        Parameters
190        ----------
191        input_signal : array_like of float
192            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
193        desired_signal : array_like of float
194            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
195        verbose : bool, optional
196            If True, prints the total runtime after completion.
197        return_internal_states : bool, optional
198            If True, includes the last internal states in ``result.extra``:
199            ``"last_regressor"``, ``"memory"``, and ``"n_coeffs"``.
200
201        Returns
202        -------
203        OptimizationResult
204            Result object with fields:
205            - outputs : ndarray of float, shape ``(N,)``
206                Scalar a priori output sequence, ``y[k] = w^T[k] u[k]``.
207            - errors : ndarray of float, shape ``(N,)``
208                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
209            - coefficients : ndarray of float
210                Volterra coefficient history recorded by the base class.
211            - error_type : str
212                Set to ``"a_priori"``.
213            - extra : dict, optional
214                Present only if ``return_internal_states=True``.
215        """
216        t0 = perf_counter()
217
218        x = np.asarray(input_signal, dtype=np.float64).ravel()
219        d = np.asarray(desired_signal, dtype=np.float64).ravel()
220
221        if x.size != d.size:
222            raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})")
223        n_samples = int(x.size)
224
225        outputs = np.zeros(n_samples, dtype=np.float64)
226        errors = np.zeros(n_samples, dtype=np.float64)
227
228        L = int(self.memory)
229        x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64)
230        x_padded[L - 1 :] = x
231
232        last_u: Optional[np.ndarray] = None
233
234        for k in range(n_samples):
235            x_lin = x_padded[k : k + L][::-1]
236            u = self._create_volterra_regressor(x_lin)
237            last_u = u
238
239            y_k = float(np.dot(self.w, u))
240            outputs[k] = y_k
241
242            e_k = float(d[k] - y_k)
243            errors[k] = e_k
244
245            if isinstance(self.step, np.ndarray):
246                self.w = self.w + (2.0 * self.step) * e_k * u
247            else:
248                self.w = self.w + (2.0 * float(self.step)) * e_k * u
249
250            self._record_history()
251
252        runtime_s = float(perf_counter() - t0)
253        if verbose:
254            print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms")
255
256        extra: Optional[Dict[str, Any]] = None
257        if return_internal_states:
258            extra = {
259                "last_regressor": None if last_u is None else last_u.copy(),
260                "memory": int(self.memory),
261                "n_coeffs": int(self.n_coeffs),
262            }
263
264        return self._pack_results(
265            outputs=outputs,
266            errors= errors,
267            runtime_s=runtime_s,
268            error_type="a_priori",
269            extra=extra,
270        )

Second-order Volterra LMS adaptive filter (real-valued).

Volterra LMS (Diniz, Alg. 11.1) using a second-order Volterra expansion. The adaptive model augments a linear tapped-delay regressor with all quadratic products (including squares) and performs an LMS-type update on the expanded coefficient vector.

Parameters

memory : int, optional Linear memory length L. The linear delay line is [x[k], x[k-1], ..., x[k-L+1]]. Default is 3. step : float or array_like of float, optional Step size mu. Can be either: - a scalar (same step for all coefficients), or - a vector with shape (n_coeffs,) for per-term step scaling. Default is 1e-2. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (n_coeffs,). If None, initializes with zeros. safe_eps : float, optional Small positive constant kept for API consistency across the library. (Not used directly by this implementation.) Default is 1e-12.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Volterra regressor (as implemented) Let the linear delay line be

$$x_{lin}[k] = [x[k], x[k-1], \ldots, x[k-L+1]]^T \in \mathbb{R}^{L}.$$

The second-order Volterra regressor is constructed as

$$u[k] =

\begin{bmatrix} x_{lin}[k] \ \mathrm{vec}\bigl(x_{lin}[k] x_{lin}^T[k]\bigr)_{i \le j} \end{bmatrix} \in \mathbb{R}^{n_{coeffs}},$$

where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
for ``0 \le i \le j \le L-1`` (unique terms only).

The number of coefficients is therefore

$$n_{coeffs} = L + \frac{L(L+1)}{2}.$$

LMS recursion (a priori) With

$$y[k] = w^T[k] u[k], \qquad e[k] = d[k] - y[k],$$

the update implemented here is

$$w[k+1] = w[k] + 2\mu\, e[k] \, u[k],$$

where ``\mu`` may be scalar or element-wise (vector step).

Implementation details - The coefficient vector self.w stores the full Volterra parameter vector (linear + quadratic) and is recorded by the base class. - The quadratic term ordering matches the nested loops used in _create_volterra_regressor() (i increasing, j from i to L-1).

References


VolterraLMS( memory: int = 3, step: Union[float, numpy.ndarray, list] = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
113    def __init__(
114        self,
115        memory: int = 3,
116        step: Union[float, np.ndarray, list] = 1e-2,
117        w_init: Optional[ArrayLike] = None,
118        *,
119        safe_eps: float = 1e-12,
120    ) -> None:
121        memory = int(memory)
122        if memory <= 0:
123            raise ValueError(f"memory must be > 0. Got {memory}.")
124
125        self.memory: int = memory
126        self.n_coeffs: int = memory + (memory * (memory + 1)) // 2
127        self._safe_eps: float = float(safe_eps)
128
129        super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init)
130
131        if isinstance(step, (list, np.ndarray)):
132            step_vec = np.asarray(step, dtype=np.float64).reshape(-1)
133            if step_vec.size != self.n_coeffs:
134                raise ValueError(
135                    f"step vector must have length {self.n_coeffs}, got {step_vec.size}."
136                )
137            self.step: Union[float, np.ndarray] = step_vec
138        else:
139            self.step = float(step)
140
141        self.w = np.asarray(self.w, dtype=np.float64)
142
143        self.w_history = []
144        self._record_history()
supports_complex: bool = False
memory: int
n_coeffs: int
w
w_history
@ensure_real_signals
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list], verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
178    @ensure_real_signals
179    def optimize(
180        self,
181        input_signal: Union[np.ndarray, list],
182        desired_signal: Union[np.ndarray, list],
183        verbose: bool = False,
184        return_internal_states: bool = False,
185    ) -> OptimizationResult:
186        """
187        Executes the Volterra LMS adaptation loop over paired input/desired sequences.
188
189        Parameters
190        ----------
191        input_signal : array_like of float
192            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
193        desired_signal : array_like of float
194            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
195        verbose : bool, optional
196            If True, prints the total runtime after completion.
197        return_internal_states : bool, optional
198            If True, includes the last internal states in ``result.extra``:
199            ``"last_regressor"``, ``"memory"``, and ``"n_coeffs"``.
200
201        Returns
202        -------
203        OptimizationResult
204            Result object with fields:
205            - outputs : ndarray of float, shape ``(N,)``
206                Scalar a priori output sequence, ``y[k] = w^T[k] u[k]``.
207            - errors : ndarray of float, shape ``(N,)``
208                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
209            - coefficients : ndarray of float
210                Volterra coefficient history recorded by the base class.
211            - error_type : str
212                Set to ``"a_priori"``.
213            - extra : dict, optional
214                Present only if ``return_internal_states=True``.
215        """
216        t0 = perf_counter()
217
218        x = np.asarray(input_signal, dtype=np.float64).ravel()
219        d = np.asarray(desired_signal, dtype=np.float64).ravel()
220
221        if x.size != d.size:
222            raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})")
223        n_samples = int(x.size)
224
225        outputs = np.zeros(n_samples, dtype=np.float64)
226        errors = np.zeros(n_samples, dtype=np.float64)
227
228        L = int(self.memory)
229        x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64)
230        x_padded[L - 1 :] = x
231
232        last_u: Optional[np.ndarray] = None
233
234        for k in range(n_samples):
235            x_lin = x_padded[k : k + L][::-1]
236            u = self._create_volterra_regressor(x_lin)
237            last_u = u
238
239            y_k = float(np.dot(self.w, u))
240            outputs[k] = y_k
241
242            e_k = float(d[k] - y_k)
243            errors[k] = e_k
244
245            if isinstance(self.step, np.ndarray):
246                self.w = self.w + (2.0 * self.step) * e_k * u
247            else:
248                self.w = self.w + (2.0 * float(self.step)) * e_k * u
249
250            self._record_history()
251
252        runtime_s = float(perf_counter() - t0)
253        if verbose:
254            print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms")
255
256        extra: Optional[Dict[str, Any]] = None
257        if return_internal_states:
258            extra = {
259                "last_regressor": None if last_u is None else last_u.copy(),
260                "memory": int(self.memory),
261                "n_coeffs": int(self.n_coeffs),
262            }
263
264        return self._pack_results(
265            outputs=outputs,
266            errors= errors,
267            runtime_s=runtime_s,
268            error_type="a_priori",
269            extra=extra,
270        )

Executes the Volterra LMS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "last_regressor", "memory", and "n_coeffs".

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar a priori output sequence, y[k] = w^T[k] u[k]. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Volterra coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True.

class VolterraRLS(pydaptivefiltering.AdaptiveFilter):
 29class VolterraRLS(AdaptiveFilter):
 30    """
 31    Second-order Volterra RLS adaptive filter (real-valued).
 32
 33    Volterra RLS (Diniz, Alg. 11.2) using a second-order Volterra expansion and
 34    an RLS update applied to the expanded regressor. The model augments a linear
 35    tapped-delay regressor with all unique quadratic products (including
 36    squares) and estimates the corresponding coefficient vector via RLS.
 37
 38    Parameters
 39    ----------
 40    memory : int, optional
 41        Linear memory length ``L``. The linear delay line is
 42        ``[x[k], x[k-1], ..., x[k-L+1]]``. Default is 3.
 43    forgetting_factor : float, optional
 44        Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.98.
 45    delta : float, optional
 46        Regularization parameter used to initialize the inverse correlation
 47        matrix as ``P(0) = I/delta`` (requires ``delta > 0``). Default is 1.0.
 48    w_init : array_like of float, optional
 49        Initial coefficient vector ``w(0)`` with shape ``(n_coeffs,)``. If None,
 50        initializes with zeros.
 51    safe_eps : float, optional
 52        Small positive constant used to guard denominators. Default is 1e-12.
 53
 54    Notes
 55    -----
 56    Real-valued only
 57        This implementation is restricted to real-valued signals and coefficients
 58        (``supports_complex=False``). The constraint is enforced via
 59        ``@ensure_real_signals`` on :meth:`optimize`.
 60
 61    Volterra regressor (as implemented)
 62        Let the linear delay line be
 63
 64        .. math::
 65            x_{lin}[k] = [x[k], x[k-1], \\ldots, x[k-L+1]]^T \\in \\mathbb{R}^{L}.
 66
 67        The second-order Volterra regressor is constructed as
 68
 69        .. math::
 70            u[k] =
 71            \\begin{bmatrix}
 72                x_{lin}[k] \\\\
 73                \\mathrm{vec}\\bigl(x_{lin}[k] x_{lin}^T[k]\\bigr)_{i \\le j}
 74            \\end{bmatrix}
 75            \\in \\mathbb{R}^{n_{coeffs}},
 76
 77        where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
 78        for ``0 \\le i \\le j \\le L-1``.
 79
 80        The number of coefficients is
 81
 82        .. math::
 83            n_{coeffs} = L + \\frac{L(L+1)}{2}.
 84
 85    RLS recursion (a priori form)
 86        With
 87
 88        .. math::
 89            y[k] = w^T[k-1] u[k], \\qquad e[k] = d[k] - y[k],
 90
 91        define the gain
 92
 93        .. math::
 94            g[k] = \\frac{P[k-1] u[k]}{\\lambda + u^T[k] P[k-1] u[k]},
 95
 96        the inverse correlation update
 97
 98        .. math::
 99            P[k] = \\frac{1}{\\lambda}\\left(P[k-1] - g[k] u^T[k] P[k-1]\\right),
100
101        and the coefficient update
102
103        .. math::
104            w[k] = w[k-1] + g[k] e[k].
105
106    A posteriori quantities
107        If requested, this implementation also computes the *a posteriori*
108        output/error after updating the coefficients at time ``k``:
109
110        .. math::
111            y^{post}[k] = w^T[k] u[k], \\qquad e^{post}[k] = d[k] - y^{post}[k].
112
113    Implementation details
114        - The denominator ``lambda + u^T P u`` is guarded by ``safe_eps`` to avoid
115          numerical issues when very small.
116        - Coefficient history is recorded via the base class.
117        - The quadratic-term ordering matches :meth:`_create_volterra_regressor`.
118
119    References
120    ----------
121    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
122       Implementation*, 5th ed., Algorithm 11.2.
123    """
124
125    supports_complex: bool = False
126
127    def __init__(
128        self,
129        memory: int = 3,
130        forgetting_factor: float = 0.98,
131        delta: float = 1.0,
132        w_init: Optional[ArrayLike] = None,
133        *,
134        safe_eps: float = 1e-12,
135    ) -> None:
136        """
137        Parameters
138        ----------
139        memory:
140            Linear memory length L. Determines number of Volterra coefficients:
141            n_coeffs = L + L(L+1)/2.
142        forgetting_factor:
143            Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1.
144        delta:
145            Positive regularization for initializing the inverse correlation matrix:
146            P[0] = I / delta.
147        w_init:
148            Optional initial coefficient vector (length n_coeffs). If None, zeros.
149        safe_eps:
150            Small epsilon to guard denominators.
151        """
152        memory = int(memory)
153        if memory <= 0:
154            raise ValueError(f"memory must be > 0. Got {memory}.")
155
156        lam = float(forgetting_factor)
157        if not (0.0 < lam <= 1.0):
158            raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.")
159
160        delta = float(delta)
161        if delta <= 0.0:
162            raise ValueError(f"delta must be > 0. Got delta={delta}.")
163
164        self.memory: int = memory
165        self.lam: float = lam
166        self._safe_eps: float = float(safe_eps)
167
168        self.n_coeffs: int = memory + (memory * (memory + 1)) // 2
169
170        super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init)
171
172        self.w = np.asarray(self.w, dtype=np.float64)
173
174        if w_init is not None:
175            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
176            if w0.size != self.n_coeffs:
177                raise ValueError(
178                    f"w_init must have length {self.n_coeffs}, got {w0.size}."
179                )
180            self.w = w0.copy()
181
182        self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta)
183
184        self.w_history = []
185        self._record_history()
186
187    def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray:
188        """
189        Constructs the second-order Volterra regressor from a linear delay line.
190
191        Parameters
192        ----------
193        x_lin : ndarray of float
194            Linear delay line with shape ``(L,)`` ordered as
195            ``[x[k], x[k-1], ..., x[k-L+1]]``.
196
197        Returns
198        -------
199        ndarray of float
200            Volterra regressor ``u[k]`` with shape ``(n_coeffs,)`` containing:
201            - linear terms, followed by
202            - quadratic terms for ``i <= j``.
203        """
204        x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1)
205        if x_lin.size != self.memory:
206            raise ValueError(f"x_lin must have length {self.memory}, got {x_lin.size}.")
207
208        quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64)
209        idx = 0
210        for i in range(self.memory):
211            for j in range(i, self.memory):
212                quad[idx] = x_lin[i] * x_lin[j]
213                idx += 1
214
215        return np.concatenate([x_lin, quad], axis=0)
216
217    @ensure_real_signals
218    @validate_input
219    def optimize(
220        self,
221        input_signal: np.ndarray,
222        desired_signal: np.ndarray,
223        verbose: bool = False,
224        return_internal_states: bool = False,
225    ) -> OptimizationResult:
226        """
227        Executes the Volterra RLS adaptation loop over paired input/desired sequences.
228
229        Parameters
230        ----------
231        input_signal : array_like of float
232            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
233        desired_signal : array_like of float
234            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
235        verbose : bool, optional
236            If True, prints the total runtime after completion.
237        return_internal_states : bool, optional
238            If True, includes additional internal sequences in ``result.extra``,
239            including a posteriori output/error and last gain/denominator.
240
241        Returns
242        -------
243        OptimizationResult
244            Result object with fields:
245            - outputs : ndarray of float, shape ``(N,)``
246                Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``.
247            - errors : ndarray of float, shape ``(N,)``
248                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
249            - coefficients : ndarray of float
250                Volterra coefficient history recorded by the base class.
251            - error_type : str
252                Set to ``"a_priori"``.
253            - extra : dict, optional
254                Present only if ``return_internal_states=True`` with:
255                - ``posteriori_outputs`` : ndarray of float
256                    A posteriori output sequence ``y^{post}[k]``.
257                - ``posteriori_errors`` : ndarray of float
258                    A posteriori error sequence ``e^{post}[k]``.
259                - ``last_gain`` : ndarray of float
260                    Last RLS gain vector ``g[k]``.
261                - ``last_den`` : float
262                    Last gain denominator ``lambda + u^T P u``.
263                - ``last_regressor`` : ndarray of float
264                    Last Volterra regressor ``u[k]``.
265                - ``memory`` : int
266                    Linear memory length ``L``.
267                - ``n_coeffs`` : int
268                    Number of Volterra coefficients.
269                - ``forgetting_factor`` : float
270                    The forgetting factor ``lambda`` used.
271        """
272        t0 = perf_counter()
273
274        x = np.asarray(input_signal, dtype=np.float64).ravel()
275        d = np.asarray(desired_signal, dtype=np.float64).ravel()
276
277        n_samples = int(x.size)
278
279        outputs = np.zeros(n_samples, dtype=np.float64)
280        errors = np.zeros(n_samples, dtype=np.float64)
281
282        y_post = np.zeros(n_samples, dtype=np.float64)
283        e_post = np.zeros(n_samples, dtype=np.float64)
284
285        L = int(self.memory)
286        x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64)
287        x_padded[L - 1 :] = x
288
289        last_k: Optional[np.ndarray] = None
290        last_den: Optional[float] = None
291        last_u: Optional[np.ndarray] = None
292
293        for k in range(n_samples):
294            x_lin = x_padded[k : k + L][::-1]
295            u = self._create_volterra_regressor(x_lin)
296            last_u = u
297
298            y_k = float(np.dot(self.w, u))
299            e_k = float(d[k] - y_k)
300            outputs[k] = y_k
301            errors[k] = e_k
302
303            Pu = self.P @ u
304            den = float(self.lam + np.dot(u, Pu))
305            if abs(den) < self._safe_eps:
306                den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps)
307
308            k_gain = Pu / den
309            last_k = k_gain
310            last_den = den
311
312            self.w = self.w + k_gain * e_k
313
314            self.P = (self.P - np.outer(k_gain, Pu)) / self.lam
315
316            yk_post = float(np.dot(self.w, u))
317            ek_post = float(d[k] - yk_post)
318            y_post[k] = yk_post
319            e_post[k] = ek_post
320
321            self._record_history()
322
323        runtime_s = float(perf_counter() - t0)
324        if verbose:
325            print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms")
326
327        extra: Optional[Dict[str, Any]] = None
328        if return_internal_states:
329            extra = {
330                "posteriori_outputs": y_post,
331                "posteriori_errors": e_post,
332                "last_gain": None if last_k is None else last_k.copy(),
333                "last_den": last_den,
334                "last_regressor": None if last_u is None else last_u.copy(),
335                "memory": int(self.memory),
336                "n_coeffs": int(self.n_coeffs),
337                "forgetting_factor": float(self.lam),
338            }
339
340        return self._pack_results(
341            outputs=outputs,
342            errors=errors,
343            runtime_s=runtime_s,
344            error_type="a_priori",
345            extra=extra,
346        )

Second-order Volterra RLS adaptive filter (real-valued).

Volterra RLS (Diniz, Alg. 11.2) using a second-order Volterra expansion and an RLS update applied to the expanded regressor. The model augments a linear tapped-delay regressor with all unique quadratic products (including squares) and estimates the corresponding coefficient vector via RLS.

Parameters

memory : int, optional Linear memory length L. The linear delay line is [x[k], x[k-1], ..., x[k-L+1]]. Default is 3. forgetting_factor : float, optional Forgetting factor lambda with 0 < lambda <= 1. Default is 0.98. delta : float, optional Regularization parameter used to initialize the inverse correlation matrix as P(0) = I/delta (requires delta > 0). Default is 1.0. w_init : array_like of float, optional Initial coefficient vector w(0) with shape (n_coeffs,). If None, initializes with zeros. safe_eps : float, optional Small positive constant used to guard denominators. Default is 1e-12.

Notes

Real-valued only This implementation is restricted to real-valued signals and coefficients (supports_complex=False). The constraint is enforced via @ensure_real_signals on optimize().

Volterra regressor (as implemented) Let the linear delay line be

$$x_{lin}[k] = [x[k], x[k-1], \ldots, x[k-L+1]]^T \in \mathbb{R}^{L}.$$

The second-order Volterra regressor is constructed as

$$u[k] =

\begin{bmatrix} x_{lin}[k] \ \mathrm{vec}\bigl(x_{lin}[k] x_{lin}^T[k]\bigr)_{i \le j} \end{bmatrix} \in \mathbb{R}^{n_{coeffs}},$$

where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
for ``0 \le i \le j \le L-1``.

The number of coefficients is

$$n_{coeffs} = L + \frac{L(L+1)}{2}.$$

RLS recursion (a priori form) With

$$y[k] = w^T[k-1] u[k], \qquad e[k] = d[k] - y[k],$$

define the gain

$$g[k] = \frac{P[k-1] u[k]}{\lambda + u^T[k] P[k-1] u[k]},$$

the inverse correlation update

$$P[k] = \frac{1}{\lambda}\left(P[k-1] - g[k] u^T[k] P[k-1]\right),$$

and the coefficient update

$$w[k] = w[k-1] + g[k] e[k].$$

A posteriori quantities If requested, this implementation also computes the a posteriori output/error after updating the coefficients at time k:

$$y^{post}[k] = w^T[k] u[k], \qquad e^{post}[k] = d[k] - y^{post}[k].$$

Implementation details - The denominator lambda + u^T P u is guarded by safe_eps to avoid numerical issues when very small. - Coefficient history is recorded via the base class. - The quadratic-term ordering matches _create_volterra_regressor().

References


VolterraRLS( memory: int = 3, forgetting_factor: float = 0.98, delta: float = 1.0, w_init: Union[numpy.ndarray, list, NoneType] = None, *, safe_eps: float = 1e-12)
127    def __init__(
128        self,
129        memory: int = 3,
130        forgetting_factor: float = 0.98,
131        delta: float = 1.0,
132        w_init: Optional[ArrayLike] = None,
133        *,
134        safe_eps: float = 1e-12,
135    ) -> None:
136        """
137        Parameters
138        ----------
139        memory:
140            Linear memory length L. Determines number of Volterra coefficients:
141            n_coeffs = L + L(L+1)/2.
142        forgetting_factor:
143            Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1.
144        delta:
145            Positive regularization for initializing the inverse correlation matrix:
146            P[0] = I / delta.
147        w_init:
148            Optional initial coefficient vector (length n_coeffs). If None, zeros.
149        safe_eps:
150            Small epsilon to guard denominators.
151        """
152        memory = int(memory)
153        if memory <= 0:
154            raise ValueError(f"memory must be > 0. Got {memory}.")
155
156        lam = float(forgetting_factor)
157        if not (0.0 < lam <= 1.0):
158            raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.")
159
160        delta = float(delta)
161        if delta <= 0.0:
162            raise ValueError(f"delta must be > 0. Got delta={delta}.")
163
164        self.memory: int = memory
165        self.lam: float = lam
166        self._safe_eps: float = float(safe_eps)
167
168        self.n_coeffs: int = memory + (memory * (memory + 1)) // 2
169
170        super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init)
171
172        self.w = np.asarray(self.w, dtype=np.float64)
173
174        if w_init is not None:
175            w0 = np.asarray(w_init, dtype=np.float64).reshape(-1)
176            if w0.size != self.n_coeffs:
177                raise ValueError(
178                    f"w_init must have length {self.n_coeffs}, got {w0.size}."
179                )
180            self.w = w0.copy()
181
182        self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta)
183
184        self.w_history = []
185        self._record_history()

Parameters

memory: Linear memory length L. Determines number of Volterra coefficients: n_coeffs = L + L(L+1)/2. forgetting_factor: Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. delta: Positive regularization for initializing the inverse correlation matrix: P[0] = I / delta. w_init: Optional initial coefficient vector (length n_coeffs). If None, zeros. safe_eps: Small epsilon to guard denominators.

supports_complex: bool = False
memory: int
lam: float
n_coeffs: int
w
P: numpy.ndarray
w_history
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
217    @ensure_real_signals
218    @validate_input
219    def optimize(
220        self,
221        input_signal: np.ndarray,
222        desired_signal: np.ndarray,
223        verbose: bool = False,
224        return_internal_states: bool = False,
225    ) -> OptimizationResult:
226        """
227        Executes the Volterra RLS adaptation loop over paired input/desired sequences.
228
229        Parameters
230        ----------
231        input_signal : array_like of float
232            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
233        desired_signal : array_like of float
234            Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened).
235        verbose : bool, optional
236            If True, prints the total runtime after completion.
237        return_internal_states : bool, optional
238            If True, includes additional internal sequences in ``result.extra``,
239            including a posteriori output/error and last gain/denominator.
240
241        Returns
242        -------
243        OptimizationResult
244            Result object with fields:
245            - outputs : ndarray of float, shape ``(N,)``
246                Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``.
247            - errors : ndarray of float, shape ``(N,)``
248                Scalar a priori error sequence, ``e[k] = d[k] - y[k]``.
249            - coefficients : ndarray of float
250                Volterra coefficient history recorded by the base class.
251            - error_type : str
252                Set to ``"a_priori"``.
253            - extra : dict, optional
254                Present only if ``return_internal_states=True`` with:
255                - ``posteriori_outputs`` : ndarray of float
256                    A posteriori output sequence ``y^{post}[k]``.
257                - ``posteriori_errors`` : ndarray of float
258                    A posteriori error sequence ``e^{post}[k]``.
259                - ``last_gain`` : ndarray of float
260                    Last RLS gain vector ``g[k]``.
261                - ``last_den`` : float
262                    Last gain denominator ``lambda + u^T P u``.
263                - ``last_regressor`` : ndarray of float
264                    Last Volterra regressor ``u[k]``.
265                - ``memory`` : int
266                    Linear memory length ``L``.
267                - ``n_coeffs`` : int
268                    Number of Volterra coefficients.
269                - ``forgetting_factor`` : float
270                    The forgetting factor ``lambda`` used.
271        """
272        t0 = perf_counter()
273
274        x = np.asarray(input_signal, dtype=np.float64).ravel()
275        d = np.asarray(desired_signal, dtype=np.float64).ravel()
276
277        n_samples = int(x.size)
278
279        outputs = np.zeros(n_samples, dtype=np.float64)
280        errors = np.zeros(n_samples, dtype=np.float64)
281
282        y_post = np.zeros(n_samples, dtype=np.float64)
283        e_post = np.zeros(n_samples, dtype=np.float64)
284
285        L = int(self.memory)
286        x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64)
287        x_padded[L - 1 :] = x
288
289        last_k: Optional[np.ndarray] = None
290        last_den: Optional[float] = None
291        last_u: Optional[np.ndarray] = None
292
293        for k in range(n_samples):
294            x_lin = x_padded[k : k + L][::-1]
295            u = self._create_volterra_regressor(x_lin)
296            last_u = u
297
298            y_k = float(np.dot(self.w, u))
299            e_k = float(d[k] - y_k)
300            outputs[k] = y_k
301            errors[k] = e_k
302
303            Pu = self.P @ u
304            den = float(self.lam + np.dot(u, Pu))
305            if abs(den) < self._safe_eps:
306                den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps)
307
308            k_gain = Pu / den
309            last_k = k_gain
310            last_den = den
311
312            self.w = self.w + k_gain * e_k
313
314            self.P = (self.P - np.outer(k_gain, Pu)) / self.lam
315
316            yk_post = float(np.dot(self.w, u))
317            ek_post = float(d[k] - yk_post)
318            y_post[k] = yk_post
319            e_post[k] = ek_post
320
321            self._record_history()
322
323        runtime_s = float(perf_counter() - t0)
324        if verbose:
325            print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms")
326
327        extra: Optional[Dict[str, Any]] = None
328        if return_internal_states:
329            extra = {
330                "posteriori_outputs": y_post,
331                "posteriori_errors": e_post,
332                "last_gain": None if last_k is None else last_k.copy(),
333                "last_den": last_den,
334                "last_regressor": None if last_u is None else last_u.copy(),
335                "memory": int(self.memory),
336                "n_coeffs": int(self.n_coeffs),
337                "forgetting_factor": float(self.lam),
338            }
339
340        return self._pack_results(
341            outputs=outputs,
342            errors=errors,
343            runtime_s=runtime_s,
344            error_type="a_priori",
345            extra=extra,
346        )

Executes the Volterra RLS adaptation loop over paired input/desired sequences.

Parameters

input_signal : array_like of float Input sequence x[k] with shape (N,) (will be flattened). desired_signal : array_like of float Desired sequence d[k] with shape (N,) (will be flattened). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes additional internal sequences in result.extra, including a posteriori output/error and last gain/denominator.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of float, shape (N,) Scalar a priori output sequence, y[k] = w^T[k-1] u[k]. - errors : ndarray of float, shape (N,) Scalar a priori error sequence, e[k] = d[k] - y[k]. - coefficients : ndarray of float Volterra coefficient history recorded by the base class. - error_type : str Set to "a_priori". - extra : dict, optional Present only if return_internal_states=True with: - posteriori_outputs : ndarray of float A posteriori output sequence y^{post}[k]. - posteriori_errors : ndarray of float A posteriori error sequence e^{post}[k]. - last_gain : ndarray of float Last RLS gain vector g[k]. - last_den : float Last gain denominator lambda + u^T P u. - last_regressor : ndarray of float Last Volterra regressor u[k]. - memory : int Linear memory length L. - n_coeffs : int Number of Volterra coefficients. - forgetting_factor : float The forgetting factor lambda used.

class CFDLMS(pydaptivefiltering.AdaptiveFilter):
 27class CFDLMS(AdaptiveFilter):
 28    """
 29    Constrained Frequency-Domain LMS (CFDLMS) for real-valued signals (block adaptive).
 30
 31    Implements the Constrained Frequency-Domain LMS algorithm (Algorithm 12.4, Diniz)
 32    for identifying/estimating a real-valued FIR system in a block-wise frequency-domain
 33    framework with a time-domain constraint (to control circular convolution / enforce
 34    effective FIR support).
 35
 36    Block structure and main variables
 37    ----------------------------------
 38    Let:
 39        - M: number of subbands / FFT size (also the block length in frequency domain),
 40        - L: decimation / number of fresh time samples per iteration (block advance),
 41        - Nw: time-support (per subband) of the adaptive filters, so each subband filter
 42              has length (Nw+1) in the *time-lag* axis (columns of `ww`).
 43
 44    Internal coefficient representation
 45    -----------------------------------
 46    The adaptive parameters are stored as a complex matrix:
 47
 48        ww  in C^{M x (Nw+1)}
 49
 50    where each row corresponds to one frequency bin (subband), and each column is a
 51    delay-tap in the *block* (overlap) dimension.
 52
 53    For compatibility with the base API:
 54        - `self.w` stores a flattened real view of `ww` (real part only),
 55        - `OptimizationResult.coefficients` comes from the base `w_history` (flattened),
 56        - the full matrix trajectory is returned in `result.extra["ww_history"]`.
 57
 58    Signal processing conventions (as implemented)
 59    ----------------------------------------------
 60    Per iteration k (block index):
 61    - Build an M-length time vector from the most recent input segment (reversed):
 62          x_p = [x[kL+M-1], ..., x[kL]]^T
 63      then compute a *unitary* FFT:
 64          ui = FFT(x_p) / sqrt(M)
 65
 66    - Maintain a regressor matrix `uu` with shape (M, Nw+1) containing the most recent
 67      Nw+1 frequency-domain regressors (columns shift right each iteration).
 68
 69    - Compute frequency-domain output per bin:
 70          uy = sum_j uu[:, j] * ww[:, j]
 71      and return to time domain:
 72          y_block = IFFT(uy) * sqrt(M)
 73
 74      Only the first L samples are used as the “valid” output of this block.
 75
 76    Error, energy smoothing, and update
 77    -----------------------------------
 78    The algorithm forms an L-length error (in the reversed time order used internally),
 79    zero-pads it to length M, and FFTs it (unitary) to obtain `et`.
 80
 81    A smoothed energy estimate per bin is kept:
 82        sig[k] = (1-a) sig[k-1] + a |ui|^2
 83    where `a = smoothing`.
 84
 85    The normalized per-bin step is:
 86        gain = step / (gamma + (Nw+1) * sig)
 87
 88    A preliminary frequency-domain correction is built:
 89        wwc = gain[:,None] * conj(uu) * et[:,None]
 90
 91    Constrained / time-domain projection
 92    ------------------------------------
 93    The “constraint” is applied by transforming wwc along axis=0 (FFT across bins),
 94    zeroing time indices >= L (i.e., enforcing an L-sample time support),
 95    and transforming back (IFFT). This is the standard “constrained” step that reduces
 96    circular-convolution artifacts.
 97
 98    Returned sequences
 99    ------------------
100    - `outputs`: real-valued estimated output, length = n_iters * L
101    - `errors`:  real-valued output error (d - y), same length as outputs
102    - `error_type="output_error"` (block output error, not a priori scalar error)
103
104    Parameters
105    ----------
106    filter_order : int, default=5
107        Subband filter order Nw (number of taps is Nw+1 along the overlap dimension).
108    n_subbands : int, default=64
109        FFT size M (number of subbands / frequency bins).
110    decimation : int, optional
111        Block advance L (samples per iteration). If None, defaults to M//2.
112    step : float, default=0.1
113        Global step size (mu).
114    gamma : float, default=1e-2
115        Regularization constant in the normalization denominator (>0).
116    smoothing : float, default=0.01
117        Exponential smoothing factor a in (0,1].
118    w_init : array_like, optional
119        Initial coefficients. Can be either:
120        - matrix shape (M, Nw+1), or
121        - flat length M*(Nw+1), reshaped internally.
122
123    Notes
124    -----
125    - Real-valued interface: input_signal and desired_signal are enforced real.
126      Internally complex arithmetic is used due to FFT processing.
127    - This is a block algorithm: one iteration produces L output samples.
128    """
129    supports_complex: bool = False
130
131    M: int
132    L: int
133    Nw: int
134    step: float
135    gamma: float
136    smoothing: float
137
138    def __init__(
139        self,
140        filter_order: int = 5,
141        n_subbands: int = 64,
142        decimation: Optional[int] = None,
143        step: float = 0.1,
144        gamma: float = 1e-2,
145        smoothing: float = 0.01,
146        w_init: Optional[Union[np.ndarray, list]] = None,
147    ) -> None:
148        if n_subbands <= 0:
149            raise ValueError("n_subbands (M) must be a positive integer.")
150        if filter_order < 0:
151            raise ValueError("filter_order (Nw) must be >= 0.")
152        if decimation is None:
153            decimation = n_subbands // 2
154        if decimation <= 0 or decimation > n_subbands:
155            raise ValueError("decimation (L) must satisfy 1 <= L <= M.")
156        if gamma <= 0:
157            raise ValueError("gamma must be > 0.")
158        if not (0.0 < smoothing <= 1.0):
159            raise ValueError("smoothing must be in (0, 1].")
160
161        self.M = int(n_subbands)
162        self.L = int(decimation)
163        self.Nw = int(filter_order)
164
165        self.step = float(step)
166        self.gamma = float(gamma)
167        self.smoothing = float(smoothing)
168
169        n_params = self.M * (self.Nw + 1)
170        super().__init__(filter_order=n_params - 1, w_init=None)
171
172        self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
173        if w_init is not None:
174            w0 = np.asarray(w_init)
175            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
176                self.ww = w0.astype(np.complex128, copy=True)
177            else:
178                w0 = w0.reshape(-1)
179                if w0.size != n_params:
180                    raise ValueError(
181                        f"w_init has incompatible size. Expected {n_params} "
182                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
183                    )
184                self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True)
185
186        self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
187        self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64)
188
189        self.w = self.ww.reshape(-1).astype(float, copy=False)
190        self.w_history = []
191        self._record_history()
192
193        self.ww_history: list[np.ndarray] = []
194
195    def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None:
196        """
197        Reset coefficients/history.
198
199        If w_new is:
200          - None: zeros
201          - shape (M, Nw+1): used directly
202          - flat of length M*(Nw+1): reshaped
203        """
204        n_params = self.M * (self.Nw + 1)
205
206        if w_new is None:
207            self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
208        else:
209            w0 = np.asarray(w_new)
210            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
211                self.ww = w0.astype(np.complex128, copy=True)
212            else:
213                w0 = w0.reshape(-1)
214                if w0.size != n_params:
215                    raise ValueError(
216                        f"w_new has incompatible size. Expected {n_params} "
217                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
218                    )
219                self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True)
220
221        self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
222        self.sig = np.zeros(self.M, dtype=np.float64)
223
224        self.ww_history = []
225        self.w = self.ww.reshape(-1).astype(float, copy=False)
226        self.w_history = []
227        self._record_history()
228
229    @ensure_real_signals
230    @validate_input
231    def optimize(
232        self,
233        input_signal: np.ndarray,
234        desired_signal: np.ndarray,
235        verbose: bool = False,
236        return_internal_states: bool = False,
237    ) -> OptimizationResult:
238        """
239        Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks.
240
241        Parameters
242        ----------
243        input_signal : array_like of float
244            Input sequence x[n], shape (N,).
245        desired_signal : array_like of float
246            Desired sequence d[n], shape (N,).
247        verbose : bool, default=False
248            If True, prints runtime and basic iteration stats.
249        return_internal_states : bool, default=False
250            If True, includes additional internal trajectories in result.extra.
251
252        Returns
253        -------
254        OptimizationResult
255            outputs : ndarray of float, shape (n_iters * L,)
256                Concatenated block outputs (L per iteration).
257            errors : ndarray of float, shape (n_iters * L,)
258                Output error sequence e[n] = d[n] - y[n].
259            coefficients : ndarray
260                Flattened coefficient history (from base class; real part of ww).
261            error_type : str
262                "output_error".
263            extra : dict
264                Always contains:
265                    - "ww_history": list of ndarray, each shape (M, Nw+1)
266                    - "n_iters": int
267                If return_internal_states=True, also contains:
268                    - "sig": ndarray, shape (M,) final smoothed per-bin energy
269                    - "sig_history": ndarray, shape (n_iters, M)
270        """
271        tic: float = time()
272
273        x = np.asarray(input_signal, dtype=np.float64).ravel()
274        d = np.asarray(desired_signal, dtype=np.float64).ravel()
275
276        M = self.M
277        L = self.L
278        Nw = self.Nw
279
280        max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0
281        max_iters_from_d = int(d.size // L)
282        n_iters = max(0, min(max_iters_from_x, max_iters_from_d))
283
284        out_len = n_iters * L
285        outputs = np.zeros(out_len, dtype=np.float64)
286        errors  = np.zeros(out_len, dtype=np.float64)
287
288        xpad = np.concatenate([np.zeros(L, dtype=np.float64), x])
289
290        self.ww_history = []
291
292        sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None
293
294        uu = self.uu
295        ww = self.ww
296        sig = self.sig
297
298        a = self.smoothing
299        u_step = self.step
300        gamma = self.gamma
301        sqrtM = np.sqrt(M)
302
303        for k in range(n_iters):
304            start = k * L
305            seg_x = xpad[start : start + M]
306
307            x_p = seg_x[::-1].astype(np.complex128, copy=False)
308
309            d_seg = d[start : start + L]
310            d_p = d_seg[::-1].astype(np.complex128, copy=False)
311
312            ui = np.fft.fft(x_p) / sqrtM
313
314            uu[:, 1:] = uu[:, :-1]
315            uu[:, 0] = ui
316
317            uy = np.sum(uu * ww, axis=1)
318
319            y_block = np.fft.ifft(uy) * sqrtM
320            y_firstL = y_block[:L]
321
322            e_rev = d_p - y_firstL
323
324            y_time = np.real(y_firstL[::-1])
325            e_time = d_seg - y_time
326
327            outputs[start : start + L] = y_time
328            errors[start : start + L] = e_time
329
330            e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)])
331            et = np.fft.fft(e_pad) / sqrtM
332            sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2)
333
334            denom = gamma + (Nw + 1) * sig
335            gain = u_step / denom
336
337            wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False)
338
339            waux = np.fft.fft(wwc, axis=0) / sqrtM
340            waux[L:, :] = 0.0
341            wwc_c = np.fft.ifft(waux, axis=0) * sqrtM
342
343            ww = ww + wwc_c
344
345            self.ww_history.append(ww.copy())
346
347            self.w = np.real(ww.reshape(-1)).astype(float, copy=False)
348            self._record_history()
349
350            if return_internal_states and sig_hist is not None:
351                sig_hist[k, :] = sig
352
353        self.uu = uu
354        self.ww = ww
355        self.sig = sig
356
357        runtime_s: float = float(time() - tic)
358        if verbose:
359            print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}")
360
361        extra: Dict[str, Any] = {
362            "ww_history": self.ww_history,
363            "n_iters": int(n_iters),
364        }
365        if return_internal_states:
366            extra["sig"] = sig.copy()
367            extra["sig_history"] = sig_hist
368
369        return self._pack_results(
370            outputs=outputs,
371            errors=errors,
372            runtime_s=runtime_s,
373            error_type="output_error",
374            extra=extra,
375        )

Constrained Frequency-Domain LMS (CFDLMS) for real-valued signals (block adaptive).

Implements the Constrained Frequency-Domain LMS algorithm (Algorithm 12.4, Diniz) for identifying/estimating a real-valued FIR system in a block-wise frequency-domain framework with a time-domain constraint (to control circular convolution / enforce effective FIR support).

Block structure and main variables

Let: - M: number of subbands / FFT size (also the block length in frequency domain), - L: decimation / number of fresh time samples per iteration (block advance), - Nw: time-support (per subband) of the adaptive filters, so each subband filter has length (Nw+1) in the time-lag axis (columns of ww).

Internal coefficient representation

The adaptive parameters are stored as a complex matrix:

ww  in C^{M x (Nw+1)}

where each row corresponds to one frequency bin (subband), and each column is a delay-tap in the block (overlap) dimension.

For compatibility with the base API: - self.w stores a flattened real view of ww (real part only), - OptimizationResult.coefficients comes from the base w_history (flattened), - the full matrix trajectory is returned in result.extra["ww_history"].

Signal processing conventions (as implemented)

Per iteration k (block index):

  • Build an M-length time vector from the most recent input segment (reversed): x_p = [x[kL+M-1], ..., x[kL]]^T then compute a *unitary* FFT: ui = FFT(x_p) / sqrt(M)
  • Maintain a regressor matrix uu with shape (M, Nw+1) containing the most recent Nw+1 frequency-domain regressors (columns shift right each iteration).

  • Compute frequency-domain output per bin: uy = sum_j uu[:, j] * ww[:, j] and return to time domain: y_block = IFFT(uy) * sqrt(M)

    Only the first L samples are used as the “valid” output of this block.

Error, energy smoothing, and update

The algorithm forms an L-length error (in the reversed time order used internally), zero-pads it to length M, and FFTs it (unitary) to obtain et.

A smoothed energy estimate per bin is kept: sig[k] = (1-a) sig[k-1] + a |ui|^2 where a = smoothing.

The normalized per-bin step is: gain = step / (gamma + (Nw+1) * sig)

A preliminary frequency-domain correction is built: wwc = gain[:,None] * conj(uu) * et[:,None]

Constrained / time-domain projection

The “constraint” is applied by transforming wwc along axis=0 (FFT across bins), zeroing time indices >= L (i.e., enforcing an L-sample time support), and transforming back (IFFT). This is the standard “constrained” step that reduces circular-convolution artifacts.

Returned sequences

  • outputs: real-valued estimated output, length = n_iters * L
  • errors: real-valued output error (d - y), same length as outputs
  • error_type="output_error" (block output error, not a priori scalar error)

Parameters

filter_order : int, default=5 Subband filter order Nw (number of taps is Nw+1 along the overlap dimension). n_subbands : int, default=64 FFT size M (number of subbands / frequency bins). decimation : int, optional Block advance L (samples per iteration). If None, defaults to M//2. step : float, default=0.1 Global step size (mu). gamma : float, default=1e-2 Regularization constant in the normalization denominator (>0). smoothing : float, default=0.01 Exponential smoothing factor a in (0,1]. w_init : array_like, optional Initial coefficients. Can be either: - matrix shape (M, Nw+1), or - flat length M*(Nw+1), reshaped internally.

Notes

  • Real-valued interface: input_signal and desired_signal are enforced real. Internally complex arithmetic is used due to FFT processing.
  • This is a block algorithm: one iteration produces L output samples.
CFDLMS( filter_order: int = 5, n_subbands: int = 64, decimation: Optional[int] = None, step: float = 0.1, gamma: float = 0.01, smoothing: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
138    def __init__(
139        self,
140        filter_order: int = 5,
141        n_subbands: int = 64,
142        decimation: Optional[int] = None,
143        step: float = 0.1,
144        gamma: float = 1e-2,
145        smoothing: float = 0.01,
146        w_init: Optional[Union[np.ndarray, list]] = None,
147    ) -> None:
148        if n_subbands <= 0:
149            raise ValueError("n_subbands (M) must be a positive integer.")
150        if filter_order < 0:
151            raise ValueError("filter_order (Nw) must be >= 0.")
152        if decimation is None:
153            decimation = n_subbands // 2
154        if decimation <= 0 or decimation > n_subbands:
155            raise ValueError("decimation (L) must satisfy 1 <= L <= M.")
156        if gamma <= 0:
157            raise ValueError("gamma must be > 0.")
158        if not (0.0 < smoothing <= 1.0):
159            raise ValueError("smoothing must be in (0, 1].")
160
161        self.M = int(n_subbands)
162        self.L = int(decimation)
163        self.Nw = int(filter_order)
164
165        self.step = float(step)
166        self.gamma = float(gamma)
167        self.smoothing = float(smoothing)
168
169        n_params = self.M * (self.Nw + 1)
170        super().__init__(filter_order=n_params - 1, w_init=None)
171
172        self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
173        if w_init is not None:
174            w0 = np.asarray(w_init)
175            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
176                self.ww = w0.astype(np.complex128, copy=True)
177            else:
178                w0 = w0.reshape(-1)
179                if w0.size != n_params:
180                    raise ValueError(
181                        f"w_init has incompatible size. Expected {n_params} "
182                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
183                    )
184                self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True)
185
186        self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
187        self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64)
188
189        self.w = self.ww.reshape(-1).astype(float, copy=False)
190        self.w_history = []
191        self._record_history()
192
193        self.ww_history: list[np.ndarray] = []
supports_complex: bool = False
M: int
L: int
Nw: int
step: float
gamma: float
smoothing: float
ww: numpy.ndarray
uu: numpy.ndarray
sig: numpy.ndarray
w
w_history
ww_history: list[numpy.ndarray]
def reset_filter(self, w_new: Union[numpy.ndarray, list, NoneType] = None) -> None:
195    def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None:
196        """
197        Reset coefficients/history.
198
199        If w_new is:
200          - None: zeros
201          - shape (M, Nw+1): used directly
202          - flat of length M*(Nw+1): reshaped
203        """
204        n_params = self.M * (self.Nw + 1)
205
206        if w_new is None:
207            self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
208        else:
209            w0 = np.asarray(w_new)
210            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
211                self.ww = w0.astype(np.complex128, copy=True)
212            else:
213                w0 = w0.reshape(-1)
214                if w0.size != n_params:
215                    raise ValueError(
216                        f"w_new has incompatible size. Expected {n_params} "
217                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
218                    )
219                self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True)
220
221        self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128)
222        self.sig = np.zeros(self.M, dtype=np.float64)
223
224        self.ww_history = []
225        self.w = self.ww.reshape(-1).astype(float, copy=False)
226        self.w_history = []
227        self._record_history()

Reset coefficients/history.

If w_new is:

  • None: zeros
  • shape (M, Nw+1): used directly
  • flat of length M*(Nw+1): reshaped
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
229    @ensure_real_signals
230    @validate_input
231    def optimize(
232        self,
233        input_signal: np.ndarray,
234        desired_signal: np.ndarray,
235        verbose: bool = False,
236        return_internal_states: bool = False,
237    ) -> OptimizationResult:
238        """
239        Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks.
240
241        Parameters
242        ----------
243        input_signal : array_like of float
244            Input sequence x[n], shape (N,).
245        desired_signal : array_like of float
246            Desired sequence d[n], shape (N,).
247        verbose : bool, default=False
248            If True, prints runtime and basic iteration stats.
249        return_internal_states : bool, default=False
250            If True, includes additional internal trajectories in result.extra.
251
252        Returns
253        -------
254        OptimizationResult
255            outputs : ndarray of float, shape (n_iters * L,)
256                Concatenated block outputs (L per iteration).
257            errors : ndarray of float, shape (n_iters * L,)
258                Output error sequence e[n] = d[n] - y[n].
259            coefficients : ndarray
260                Flattened coefficient history (from base class; real part of ww).
261            error_type : str
262                "output_error".
263            extra : dict
264                Always contains:
265                    - "ww_history": list of ndarray, each shape (M, Nw+1)
266                    - "n_iters": int
267                If return_internal_states=True, also contains:
268                    - "sig": ndarray, shape (M,) final smoothed per-bin energy
269                    - "sig_history": ndarray, shape (n_iters, M)
270        """
271        tic: float = time()
272
273        x = np.asarray(input_signal, dtype=np.float64).ravel()
274        d = np.asarray(desired_signal, dtype=np.float64).ravel()
275
276        M = self.M
277        L = self.L
278        Nw = self.Nw
279
280        max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0
281        max_iters_from_d = int(d.size // L)
282        n_iters = max(0, min(max_iters_from_x, max_iters_from_d))
283
284        out_len = n_iters * L
285        outputs = np.zeros(out_len, dtype=np.float64)
286        errors  = np.zeros(out_len, dtype=np.float64)
287
288        xpad = np.concatenate([np.zeros(L, dtype=np.float64), x])
289
290        self.ww_history = []
291
292        sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None
293
294        uu = self.uu
295        ww = self.ww
296        sig = self.sig
297
298        a = self.smoothing
299        u_step = self.step
300        gamma = self.gamma
301        sqrtM = np.sqrt(M)
302
303        for k in range(n_iters):
304            start = k * L
305            seg_x = xpad[start : start + M]
306
307            x_p = seg_x[::-1].astype(np.complex128, copy=False)
308
309            d_seg = d[start : start + L]
310            d_p = d_seg[::-1].astype(np.complex128, copy=False)
311
312            ui = np.fft.fft(x_p) / sqrtM
313
314            uu[:, 1:] = uu[:, :-1]
315            uu[:, 0] = ui
316
317            uy = np.sum(uu * ww, axis=1)
318
319            y_block = np.fft.ifft(uy) * sqrtM
320            y_firstL = y_block[:L]
321
322            e_rev = d_p - y_firstL
323
324            y_time = np.real(y_firstL[::-1])
325            e_time = d_seg - y_time
326
327            outputs[start : start + L] = y_time
328            errors[start : start + L] = e_time
329
330            e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)])
331            et = np.fft.fft(e_pad) / sqrtM
332            sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2)
333
334            denom = gamma + (Nw + 1) * sig
335            gain = u_step / denom
336
337            wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False)
338
339            waux = np.fft.fft(wwc, axis=0) / sqrtM
340            waux[L:, :] = 0.0
341            wwc_c = np.fft.ifft(waux, axis=0) * sqrtM
342
343            ww = ww + wwc_c
344
345            self.ww_history.append(ww.copy())
346
347            self.w = np.real(ww.reshape(-1)).astype(float, copy=False)
348            self._record_history()
349
350            if return_internal_states and sig_hist is not None:
351                sig_hist[k, :] = sig
352
353        self.uu = uu
354        self.ww = ww
355        self.sig = sig
356
357        runtime_s: float = float(time() - tic)
358        if verbose:
359            print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}")
360
361        extra: Dict[str, Any] = {
362            "ww_history": self.ww_history,
363            "n_iters": int(n_iters),
364        }
365        if return_internal_states:
366            extra["sig"] = sig.copy()
367            extra["sig_history"] = sig_hist
368
369        return self._pack_results(
370            outputs=outputs,
371            errors=errors,
372            runtime_s=runtime_s,
373            error_type="output_error",
374            extra=extra,
375        )

Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks.

Parameters

input_signal : array_like of float Input sequence x[n], shape (N,). desired_signal : array_like of float Desired sequence d[n], shape (N,). verbose : bool, default=False If True, prints runtime and basic iteration stats. return_internal_states : bool, default=False If True, includes additional internal trajectories in result.extra.

Returns

OptimizationResult outputs : ndarray of float, shape (n_iters * L,) Concatenated block outputs (L per iteration). errors : ndarray of float, shape (n_iters * L,) Output error sequence e[n] = d[n] - y[n]. coefficients : ndarray Flattened coefficient history (from base class; real part of ww). error_type : str "output_error". extra : dict Always contains: - "ww_history": list of ndarray, each shape (M, Nw+1) - "n_iters": int If return_internal_states=True, also contains: - "sig": ndarray, shape (M,) final smoothed per-bin energy - "sig_history": ndarray, shape (n_iters, M)

class DLCLLMS(pydaptivefiltering.AdaptiveFilter):
 67class DLCLLMS(AdaptiveFilter):
 68    """
 69    Delayless Closed-Loop Subband LMS (DLCLLMS) for real-valued fullband signals.
 70
 71    Implements the Delayless Closed-Loop Subband LMS adaptive filtering algorithm
 72    (Algorithm 12.3, Diniz) using:
 73      - a DFT analysis bank (complex subband signals),
 74      - a polyphase Nyquist / fractional-delay prototype (Ed) to realize the delayless
 75        closed-loop structure,
 76      - and an equivalent fullband FIR mapping (GG) used to generate the output in the
 77        time domain.
 78
 79    High-level operation (as implemented)
 80    -------------------------------------
 81    Processing is block-based with block length:
 82        L = M   (M = number of subbands / DFT size)
 83
 84    For each block k:
 85      1) Form a reversed block x_p and pass each sample through a per-branch fractional-delay
 86         structure (polyphase) driven by `Ed`, producing x_frac (length M).
 87      2) Compute subband input:
 88            x_sb = F @ x_frac
 89         where F is the (non-unitary) DFT matrix (MATLAB dftmtx convention).
 90      3) Map current subband coefficients to an equivalent fullband FIR:
 91            GG = equivalent_fullband(w_sb)
 92         and filter the fullband input block through GG (with state) to produce y_block.
 93      4) Compute fullband error e_block = d_block - y_block.
 94      5) Pass the reversed error block through the same fractional-delay structure to get e_frac,
 95         then compute subband error:
 96            e_sb = F @ e_frac
 97      6) Update subband coefficients with an LMS-like recursion using a subband delay line x_cl
 98         and a smoothed power estimate sig[m]:
 99            sig[m] = (1-a) sig[m] + a |x_sb[m]|^2
100            mu_n  = step / (gamma + (Nw+1) * sig[m])
101            w_sb[m,:] <- w_sb[m,:] + 2 * mu_n * conj(e_sb[m]) * x_cl[m,:]
102
103    Coefficient representation and mapping
104    --------------------------------------
105    - Subband coefficients are stored in:
106          w_sb : complex ndarray, shape (M, Nw+1)
107
108    - For output synthesis and for the base API, an equivalent fullband FIR is built:
109          GG : real ndarray, length (M*Nw)
110
111      The mapping matches the provided MATLAB logic:
112        * Compute ww = real(F^H w_sb) / M
113        * For branch m=0: take ww[0, :Nw]
114        * For m>=1: convolve ww[m,:] with Ed[m-1,:] and extract a length-Nw segment
115          starting at (Dint+1), where Dint=(P-1)//2 and P is the polyphase length.
116
117    - The base-class coefficient vector `self.w` stores GG (float), and
118      `OptimizationResult.coefficients` contains the history of GG recorded **once per block**
119      (plus the initial entry).
120
121    Parameters
122    ----------
123    filter_order : int, default=5
124        Subband filter order Nw (number of taps per subband delay line is Nw+1).
125    n_subbands : int, default=4
126        Number of subbands M (DFT size). Also equals the processing block length L.
127    step : float, default=0.1
128        Global LMS step size.
129    gamma : float, default=1e-2
130        Regularization constant in the normalized step denominator (>0 recommended).
131    a : float, default=1e-2
132        Exponential smoothing factor for subband power sig in (0,1].
133    nyquist_len : int, default=2
134        Length Nfd of the Nyquist (fractional-delay) prototype used to build Ed.
135    w_init : array_like, optional
136        Initial subband coefficient matrix. Can be either:
137          - shape (M, Nw+1), or
138          - flat length M*(Nw+1), reshaped internally.
139
140    Notes
141    -----
142    - Real-valued interface (input_signal and desired_signal enforced real). Internal
143      computations use complex subband signals.
144    - This implementation processes only `n_used = floor(N/M)*M` samples. Any tail
145      samples (N - n_used) are left with output=0 and error=d in that region.
146    - The reported `error_type` is "output_error" (fullband output error sequence).
147    """
148    supports_complex: bool = False
149
150    def __init__(
151        self,
152        filter_order: int = 5,
153        n_subbands: int = 4,
154        step: float = 0.1,
155        gamma: float = 1e-2,
156        a: float = 1e-2,
157        nyquist_len: int = 2,
158        w_init: Optional[Union[np.ndarray, list]] = None,
159    ) -> None:
160        self.M: int = int(n_subbands)
161        if self.M <= 0:
162            raise ValueError("n_subbands must be a positive integer.")
163
164        self.Nw: int = int(filter_order)
165        if self.Nw <= 0:
166            raise ValueError("filter_order must be a positive integer.")
167
168        self.step: float = float(step)
169        self.gamma: float = float(gamma)
170        self.a: float = float(a)
171
172        self.nyquist_len: int = int(nyquist_len)
173        if self.nyquist_len <= 0:
174            raise ValueError("nyquist_len must be a positive integer.")
175
176        self._full_len: int = int(self.M * self.Nw)
177
178        super().__init__(filter_order=self._full_len - 1, w_init=None)
179
180        self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len)
181        self._P: int = int(self.Ed.shape[1])
182        self._Dint: int = int((self._P - 1) // 2)
183
184        self.F: np.ndarray = _dft_matrix(self.M)
185
186        self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex)
187        if w_init is not None:
188            w0 = np.asarray(w_init)
189            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
190                self.w_sb = w0.astype(complex, copy=True)
191            else:
192                w0 = w0.reshape(-1)
193                if w0.size != self.M * (self.Nw + 1):
194                    raise ValueError(
195                        f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} "
196                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
197                    )
198                self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True)
199
200        self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex)
201
202        self.sig: np.ndarray = np.zeros((self.M,), dtype=float)
203
204        self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float)
205        self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float)
206
207        self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float)
208
209        self.w_history = []
210        self._record_history()
211
212    def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None:
213        """
214        Reset coefficients and history.
215
216        - If w_new is provided:
217            * If shape (M, Nw+1): interpreted as subband coefficients.
218            * If flat of length M*(Nw+1): reshaped as subband coefficients.
219        - Resets internal states (x_cl, sig, fractional-delay, FIR state).
220        """
221        if w_new is None:
222            self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex)
223        else:
224            w0 = np.asarray(w_new)
225            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
226                self.w_sb = w0.astype(complex, copy=True)
227            else:
228                w0 = w0.reshape(-1)
229                if w0.size != self.M * (self.Nw + 1):
230                    raise ValueError(
231                        f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} "
232                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
233                    )
234                self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True)
235
236        self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex)
237        self.sig = np.zeros((self.M,), dtype=float)
238        self._xx_frac = np.zeros((self._P, self.M), dtype=float)
239        self._ee_frac = np.zeros((self._P, self.M), dtype=float)
240        self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float)
241
242        GG = self._equivalent_fullband()
243        self.w = GG.astype(float, copy=True)
244        self.w_history = []
245        self._record_history()
246
247    def _equivalent_fullband(self) -> np.ndarray:
248        """
249        Build the equivalent fullband FIR GG (length M*Nw) from current subband coefficients,
250        matching the MATLAB mapping.
251
252        Returns
253        -------
254        GG : np.ndarray, shape (M*Nw,), dtype=float
255        """
256        ww = np.real(self.F.conj().T @ self.w_sb) / float(self.M)
257
258        G = np.zeros((self.M, self.Nw), dtype=float)
259        G[0, :] = ww[0, : self.Nw]
260
261        for m in range(1, self.M):
262            aux = np.convolve(self.Ed[m - 1, :], ww[m, :], mode="full")
263            start = self._Dint + 1
264            stop = start + self.Nw
265            G[m, :] = aux[start:stop]
266
267        GG = G.reshape(-1, order="F")
268        return GG
269
270    def _fir_block(self, b: np.ndarray, x_block: np.ndarray) -> np.ndarray:
271        """
272        FIR filtering with state, matching MATLAB `filter(b,1,x,zi)` block-by-block.
273        """
274        Lb = int(b.size)
275        if Lb == 0:
276            return np.zeros_like(x_block, dtype=float)
277        if Lb == 1:
278            return float(b[0]) * x_block
279
280        y = np.zeros_like(x_block, dtype=float)
281        state = self._x_state
282
283        for i, x_n in enumerate(x_block):
284            acc = float(b[0]) * float(x_n)
285            if Lb > 1 and state.size > 0:
286                acc += float(np.dot(b[1:], state[: Lb - 1]))
287            y[i] = acc
288
289            if state.size > 0:
290                state[1:] = state[:-1]
291                state[0] = float(x_n)
292
293        self._x_state = state
294        return y
295
296    @ensure_real_signals
297    @validate_input
298    def optimize(
299        self,
300        input_signal: np.ndarray,
301        desired_signal: np.ndarray,
302        verbose: bool = False,
303        return_internal_states: bool = False,
304    ) -> OptimizationResult:
305        """
306        Run DLCLLMS adaptation block-by-block.
307
308        Parameters
309        ----------
310        input_signal : array_like of float
311            Fullband input x[n], shape (N,).
312        desired_signal : array_like of float
313            Fullband desired d[n], shape (N,).
314        verbose : bool, default=False
315            If True, prints runtime and block stats.
316        return_internal_states : bool, default=False
317            If True, returns additional internal trajectories in result.extra.
318
319        Returns
320        -------
321        OptimizationResult
322            outputs : ndarray of float, shape (N,)
323                Estimated fullband output y[n]. Only the first `n_used` samples are
324                produced by block processing; remaining tail (if any) is zero.
325            errors : ndarray of float, shape (N,)
326                Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there.
327            coefficients : ndarray
328                History of equivalent fullband FIR vectors GG (length M*Nw), stored
329                once per processed block (plus initial entry).
330            error_type : str
331                "output_error".
332
333            extra : dict
334                Always contains:
335                    - "n_blocks": number of processed blocks
336                    - "block_len": block length (equals M)
337                    - "n_used": number of processed samples (multiple of M)
338                If return_internal_states=True, also contains:
339                    - "sig_history": ndarray (n_blocks, M) of smoothed subband power
340                    - "w_sb_final": final subband coefficient matrix (M, Nw+1)
341        """
342        tic: float = time()
343
344        x = np.asarray(input_signal, dtype=float).ravel()
345        d = np.asarray(desired_signal, dtype=float).ravel()
346
347        n_samples: int = int(x.size)
348        M: int = int(self.M)
349        L: int = M
350
351        n_blocks: int = int(n_samples // L)
352        n_used: int = int(n_blocks * L)
353
354        outputs = np.zeros((n_samples,), dtype=float)
355        errors = np.zeros((n_samples,), dtype=float)
356
357        sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None
358
359        self.w_history = []
360        self._record_history()
361
362        if n_blocks == 0:
363            errors = d - outputs
364            runtime_s: float = float(time() - tic)
365            extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0}
366            return self._pack_results(
367                outputs=outputs,
368                errors=errors,
369                runtime_s=runtime_s,
370                error_type="output_error",
371                extra=extra,
372            )
373
374        for k in range(n_blocks):
375            i0 = k * L
376            i1 = i0 + L
377
378            x_block = x[i0:i1]
379            d_block = d[i0:i1]
380
381            x_p = x_block[::-1]
382
383            x_frac = np.zeros((M,), dtype=float)
384            for m in range(M):
385                self._xx_frac[1:, m] = self._xx_frac[:-1, m]
386                self._xx_frac[0, m] = x_p[m]
387                x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m]))
388
389            xsb = self.F @ x_frac.astype(complex)
390
391            GG = self._equivalent_fullband()
392            y_block = self._fir_block(GG, x_block)
393
394            outputs[i0:i1] = y_block
395            e_block = d_block - y_block
396            errors[i0:i1] = e_block
397
398            self.w = GG.astype(float, copy=True)
399            self._record_history()
400
401            e_p = e_block[::-1]
402            e_frac = np.zeros((M,), dtype=float)
403            for m in range(M):
404                self._ee_frac[1:, m] = self._ee_frac[:-1, m]
405                self._ee_frac[0, m] = e_p[m]
406                e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m]))
407
408            esb = self.F @ e_frac.astype(complex)
409
410            for m in range(M):
411                self.x_cl[m, 1:] = self.x_cl[m, :-1]
412                self.x_cl[m, 0] = xsb[m]
413
414                self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2)
415
416                mu_n = self.step / (self.gamma + (self.Nw + 1) * self.sig[m])
417
418                self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :]
419
420            if return_internal_states and sig_hist is not None:
421                sig_hist[k, :] = self.sig
422
423        if n_used < n_samples:
424            outputs[n_used:] = 0.0
425            errors[n_used:] = d[n_used:] - outputs[n_used:]
426
427        runtime_s: float = float(time() - tic)
428        if verbose:
429            print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}")
430
431        extra: Dict[str, Any] = {
432            "n_blocks": int(n_blocks),
433            "block_len": int(L),
434            "n_used": int(n_used),
435        }
436        if return_internal_states:
437            extra.update(
438                {
439                    "sig_history": sig_hist,
440                    "w_sb_final": self.w_sb.copy(),
441                }
442            )
443
444        return self._pack_results(
445            outputs=outputs,
446            errors=errors,
447            runtime_s=runtime_s,
448            error_type="output_error",
449            extra=extra,
450        )

Delayless Closed-Loop Subband LMS (DLCLLMS) for real-valued fullband signals.

Implements the Delayless Closed-Loop Subband LMS adaptive filtering algorithm (Algorithm 12.3, Diniz) using:

  • a DFT analysis bank (complex subband signals),
  • a polyphase Nyquist / fractional-delay prototype (Ed) to realize the delayless closed-loop structure,
  • and an equivalent fullband FIR mapping (GG) used to generate the output in the time domain.

High-level operation (as implemented)

Processing is block-based with block length: L = M (M = number of subbands / DFT size)

For each block k: 1) Form a reversed block x_p and pass each sample through a per-branch fractional-delay structure (polyphase) driven by Ed, producing x_frac (length M). 2) Compute subband input: x_sb = F @ x_frac where F is the (non-unitary) DFT matrix (MATLAB dftmtx convention). 3) Map current subband coefficients to an equivalent fullband FIR: GG = equivalent_fullband(w_sb) and filter the fullband input block through GG (with state) to produce y_block. 4) Compute fullband error e_block = d_block - y_block. 5) Pass the reversed error block through the same fractional-delay structure to get e_frac, then compute subband error: e_sb = F @ e_frac 6) Update subband coefficients with an LMS-like recursion using a subband delay line x_cl and a smoothed power estimate sig[m]: sig[m] = (1-a) sig[m] + a |x_sb[m]|^2 mu_n = step / (gamma + (Nw+1) * sig[m]) w_sb[m,:] <- w_sb[m,:] + 2 * mu_n * conj(e_sb[m]) * x_cl[m,:]

Coefficient representation and mapping

  • Subband coefficients are stored in: w_sb : complex ndarray, shape (M, Nw+1)

  • For output synthesis and for the base API, an equivalent fullband FIR is built: GG : real ndarray, length (M*Nw)

    The mapping matches the provided MATLAB logic:

    • Compute ww = real(F^H w_sb) / M
    • For branch m=0: take ww[0, :Nw]
    • For m>=1: convolve ww[m,:] with Ed[m-1,:] and extract a length-Nw segment starting at (Dint+1), where Dint=(P-1)//2 and P is the polyphase length.
  • The base-class coefficient vector self.w stores GG (float), and OptimizationResult.coefficients contains the history of GG recorded once per block (plus the initial entry).

Parameters

filter_order : int, default=5 Subband filter order Nw (number of taps per subband delay line is Nw+1). n_subbands : int, default=4 Number of subbands M (DFT size). Also equals the processing block length L. step : float, default=0.1 Global LMS step size. gamma : float, default=1e-2 Regularization constant in the normalized step denominator (>0 recommended). a : float, default=1e-2 Exponential smoothing factor for subband power sig in (0,1]. nyquist_len : int, default=2 Length Nfd of the Nyquist (fractional-delay) prototype used to build Ed. w_init : array_like, optional Initial subband coefficient matrix. Can be either: - shape (M, Nw+1), or - flat length M*(Nw+1), reshaped internally.

Notes

  • Real-valued interface (input_signal and desired_signal enforced real). Internal computations use complex subband signals.
  • This implementation processes only n_used = floor(N/M)*M samples. Any tail samples (N - n_used) are left with output=0 and error=d in that region.
  • The reported error_type is "output_error" (fullband output error sequence).
DLCLLMS( filter_order: int = 5, n_subbands: int = 4, step: float = 0.1, gamma: float = 0.01, a: float = 0.01, nyquist_len: int = 2, w_init: Union[numpy.ndarray, list, NoneType] = None)
150    def __init__(
151        self,
152        filter_order: int = 5,
153        n_subbands: int = 4,
154        step: float = 0.1,
155        gamma: float = 1e-2,
156        a: float = 1e-2,
157        nyquist_len: int = 2,
158        w_init: Optional[Union[np.ndarray, list]] = None,
159    ) -> None:
160        self.M: int = int(n_subbands)
161        if self.M <= 0:
162            raise ValueError("n_subbands must be a positive integer.")
163
164        self.Nw: int = int(filter_order)
165        if self.Nw <= 0:
166            raise ValueError("filter_order must be a positive integer.")
167
168        self.step: float = float(step)
169        self.gamma: float = float(gamma)
170        self.a: float = float(a)
171
172        self.nyquist_len: int = int(nyquist_len)
173        if self.nyquist_len <= 0:
174            raise ValueError("nyquist_len must be a positive integer.")
175
176        self._full_len: int = int(self.M * self.Nw)
177
178        super().__init__(filter_order=self._full_len - 1, w_init=None)
179
180        self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len)
181        self._P: int = int(self.Ed.shape[1])
182        self._Dint: int = int((self._P - 1) // 2)
183
184        self.F: np.ndarray = _dft_matrix(self.M)
185
186        self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex)
187        if w_init is not None:
188            w0 = np.asarray(w_init)
189            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
190                self.w_sb = w0.astype(complex, copy=True)
191            else:
192                w0 = w0.reshape(-1)
193                if w0.size != self.M * (self.Nw + 1):
194                    raise ValueError(
195                        f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} "
196                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
197                    )
198                self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True)
199
200        self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex)
201
202        self.sig: np.ndarray = np.zeros((self.M,), dtype=float)
203
204        self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float)
205        self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float)
206
207        self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float)
208
209        self.w_history = []
210        self._record_history()
supports_complex: bool = False
M: int
Nw: int
step: float
gamma: float
a: float
nyquist_len: int
Ed: numpy.ndarray
F: numpy.ndarray
w_sb: numpy.ndarray
x_cl: numpy.ndarray
sig: numpy.ndarray
w_history
def reset_filter(self, w_new: Union[numpy.ndarray, list, NoneType] = None) -> None:
212    def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None:
213        """
214        Reset coefficients and history.
215
216        - If w_new is provided:
217            * If shape (M, Nw+1): interpreted as subband coefficients.
218            * If flat of length M*(Nw+1): reshaped as subband coefficients.
219        - Resets internal states (x_cl, sig, fractional-delay, FIR state).
220        """
221        if w_new is None:
222            self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex)
223        else:
224            w0 = np.asarray(w_new)
225            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
226                self.w_sb = w0.astype(complex, copy=True)
227            else:
228                w0 = w0.reshape(-1)
229                if w0.size != self.M * (self.Nw + 1):
230                    raise ValueError(
231                        f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} "
232                        f"or shape ({self.M},{self.Nw+1}), got {w0.size}."
233                    )
234                self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True)
235
236        self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex)
237        self.sig = np.zeros((self.M,), dtype=float)
238        self._xx_frac = np.zeros((self._P, self.M), dtype=float)
239        self._ee_frac = np.zeros((self._P, self.M), dtype=float)
240        self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float)
241
242        GG = self._equivalent_fullband()
243        self.w = GG.astype(float, copy=True)
244        self.w_history = []
245        self._record_history()

Reset coefficients and history.

  • If w_new is provided:
    • If shape (M, Nw+1): interpreted as subband coefficients.
    • If flat of length M*(Nw+1): reshaped as subband coefficients.
  • Resets internal states (x_cl, sig, fractional-delay, FIR state).
@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
296    @ensure_real_signals
297    @validate_input
298    def optimize(
299        self,
300        input_signal: np.ndarray,
301        desired_signal: np.ndarray,
302        verbose: bool = False,
303        return_internal_states: bool = False,
304    ) -> OptimizationResult:
305        """
306        Run DLCLLMS adaptation block-by-block.
307
308        Parameters
309        ----------
310        input_signal : array_like of float
311            Fullband input x[n], shape (N,).
312        desired_signal : array_like of float
313            Fullband desired d[n], shape (N,).
314        verbose : bool, default=False
315            If True, prints runtime and block stats.
316        return_internal_states : bool, default=False
317            If True, returns additional internal trajectories in result.extra.
318
319        Returns
320        -------
321        OptimizationResult
322            outputs : ndarray of float, shape (N,)
323                Estimated fullband output y[n]. Only the first `n_used` samples are
324                produced by block processing; remaining tail (if any) is zero.
325            errors : ndarray of float, shape (N,)
326                Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there.
327            coefficients : ndarray
328                History of equivalent fullband FIR vectors GG (length M*Nw), stored
329                once per processed block (plus initial entry).
330            error_type : str
331                "output_error".
332
333            extra : dict
334                Always contains:
335                    - "n_blocks": number of processed blocks
336                    - "block_len": block length (equals M)
337                    - "n_used": number of processed samples (multiple of M)
338                If return_internal_states=True, also contains:
339                    - "sig_history": ndarray (n_blocks, M) of smoothed subband power
340                    - "w_sb_final": final subband coefficient matrix (M, Nw+1)
341        """
342        tic: float = time()
343
344        x = np.asarray(input_signal, dtype=float).ravel()
345        d = np.asarray(desired_signal, dtype=float).ravel()
346
347        n_samples: int = int(x.size)
348        M: int = int(self.M)
349        L: int = M
350
351        n_blocks: int = int(n_samples // L)
352        n_used: int = int(n_blocks * L)
353
354        outputs = np.zeros((n_samples,), dtype=float)
355        errors = np.zeros((n_samples,), dtype=float)
356
357        sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None
358
359        self.w_history = []
360        self._record_history()
361
362        if n_blocks == 0:
363            errors = d - outputs
364            runtime_s: float = float(time() - tic)
365            extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0}
366            return self._pack_results(
367                outputs=outputs,
368                errors=errors,
369                runtime_s=runtime_s,
370                error_type="output_error",
371                extra=extra,
372            )
373
374        for k in range(n_blocks):
375            i0 = k * L
376            i1 = i0 + L
377
378            x_block = x[i0:i1]
379            d_block = d[i0:i1]
380
381            x_p = x_block[::-1]
382
383            x_frac = np.zeros((M,), dtype=float)
384            for m in range(M):
385                self._xx_frac[1:, m] = self._xx_frac[:-1, m]
386                self._xx_frac[0, m] = x_p[m]
387                x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m]))
388
389            xsb = self.F @ x_frac.astype(complex)
390
391            GG = self._equivalent_fullband()
392            y_block = self._fir_block(GG, x_block)
393
394            outputs[i0:i1] = y_block
395            e_block = d_block - y_block
396            errors[i0:i1] = e_block
397
398            self.w = GG.astype(float, copy=True)
399            self._record_history()
400
401            e_p = e_block[::-1]
402            e_frac = np.zeros((M,), dtype=float)
403            for m in range(M):
404                self._ee_frac[1:, m] = self._ee_frac[:-1, m]
405                self._ee_frac[0, m] = e_p[m]
406                e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m]))
407
408            esb = self.F @ e_frac.astype(complex)
409
410            for m in range(M):
411                self.x_cl[m, 1:] = self.x_cl[m, :-1]
412                self.x_cl[m, 0] = xsb[m]
413
414                self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2)
415
416                mu_n = self.step / (self.gamma + (self.Nw + 1) * self.sig[m])
417
418                self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :]
419
420            if return_internal_states and sig_hist is not None:
421                sig_hist[k, :] = self.sig
422
423        if n_used < n_samples:
424            outputs[n_used:] = 0.0
425            errors[n_used:] = d[n_used:] - outputs[n_used:]
426
427        runtime_s: float = float(time() - tic)
428        if verbose:
429            print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}")
430
431        extra: Dict[str, Any] = {
432            "n_blocks": int(n_blocks),
433            "block_len": int(L),
434            "n_used": int(n_used),
435        }
436        if return_internal_states:
437            extra.update(
438                {
439                    "sig_history": sig_hist,
440                    "w_sb_final": self.w_sb.copy(),
441                }
442            )
443
444        return self._pack_results(
445            outputs=outputs,
446            errors=errors,
447            runtime_s=runtime_s,
448            error_type="output_error",
449            extra=extra,
450        )

Run DLCLLMS adaptation block-by-block.

Parameters

input_signal : array_like of float Fullband input x[n], shape (N,). desired_signal : array_like of float Fullband desired d[n], shape (N,). verbose : bool, default=False If True, prints runtime and block stats. return_internal_states : bool, default=False If True, returns additional internal trajectories in result.extra.

Returns

OptimizationResult outputs : ndarray of float, shape (N,) Estimated fullband output y[n]. Only the first n_used samples are produced by block processing; remaining tail (if any) is zero. errors : ndarray of float, shape (N,) Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there. coefficients : ndarray History of equivalent fullband FIR vectors GG (length M*Nw), stored once per processed block (plus initial entry). error_type : str "output_error".

extra : dict
    Always contains:
        - "n_blocks": number of processed blocks
        - "block_len": block length (equals M)
        - "n_used": number of processed samples (multiple of M)
    If return_internal_states=True, also contains:
        - "sig_history": ndarray (n_blocks, M) of smoothed subband power
        - "w_sb_final": final subband coefficient matrix (M, Nw+1)
class OLSBLMS(pydaptivefiltering.AdaptiveFilter):
 50class OLSBLMS(AdaptiveFilter):
 51    """
 52    Open-Loop Subband LMS (OLSBLMS) for real-valued fullband signals.
 53
 54    Implements the Open-Loop Subband LMS adaptive filtering algorithm
 55    (Algorithm 12.1, Diniz) using an analysis/synthesis filterbank with
 56    subband-adaptive FIR filters.
 57
 58    High-level operation (as implemented)
 59    -------------------------------------
 60    Given fullband input x[n] and desired d[n], and an M-channel analysis bank h_k[m],
 61    the algorithm proceeds in two stages:
 62
 63    (A) Analysis + Decimation (open-loop)
 64        For each subband m = 0..M-1:
 65          - Filter the fullband input and desired with the analysis filter:
 66                x_aux[m] = filter(hk[m], 1, x)
 67                d_aux[m] = filter(hk[m], 1, d)
 68          - Decimate by L (keep samples 0, L, 2L, ...):
 69                x_sb[m] = x_aux[m][::L]
 70                d_sb[m] = d_aux[m][::L]
 71
 72        The adaptation length is:
 73            N_iter = min_m len(x_sb[m]) and len(d_sb[m])
 74        (i.e., all subbands are truncated to the shortest decimated sequence).
 75
 76    (B) Subband LMS adaptation (per-sample in decimated time)
 77        Each subband has its own tapped-delay line x_ol[m,:] of length (Nw+1) and
 78        its own coefficient vector w_mat[m,:] (also length Nw+1).
 79
 80        For each decimated-time index k = 0..N_iter-1, and for each subband m:
 81          - Update subband delay line:
 82                x_ol[m,0] = x_sb[m,k]
 83          - Compute subband output and error:
 84                y_sb[m,k] = w_mat[m]^T x_ol[m]
 85                e_sb[m,k] = d_sb[m,k] - y_sb[m,k]
 86          - Update a smoothed subband energy estimate:
 87                sig_ol[m] = (1-a) sig_ol[m] + a * x_sb[m,k]^2
 88          - Normalized LMS-like step:
 89                mu_m = (2*step) / (gamma + (Nw+1)*sig_ol[m])
 90          - Coefficient update:
 91                w_mat[m] <- w_mat[m] + mu_m * e_sb[m,k] * x_ol[m]
 92
 93    Fullband reconstruction (convenience synthesis)
 94    ----------------------------------------------
 95    After adaptation, a fullband output is reconstructed via the synthesis bank f_k[m]:
 96      - Upsample each subband output by L (zero-stuffing), then filter:
 97            y_up[m]   = upsample(y_sb[m], L)
 98            y_full[m] = filter(fk[m], 1, y_up[m])
 99      - Sum across subbands:
100            y[n] = sum_m y_full[m][n]
101    The returned error is the fullband output error e[n] = d[n] - y[n].
102
103    Coefficient representation and history
104    --------------------------------------
105    - The adaptive parameters are stored as:
106          w_mat : ndarray, shape (M, Nw+1), dtype=float
107    - For compatibility with the base class, `self.w` is a flattened view of w_mat
108      (row-major), and `OptimizationResult.coefficients` contains the stacked history
109      of this flattened vector (recorded once per decimated-time iteration, plus the
110      initial entry).
111    - The full (M, Nw+1) snapshots are also stored in `extra["w_matrix_history"]`.
112
113    Parameters
114    ----------
115    n_subbands : int
116        Number of subbands (M).
117    analysis_filters : array_like
118        Analysis bank hk with shape (M, Lh).
119    synthesis_filters : array_like
120        Synthesis bank fk with shape (M, Lf).
121    filter_order : int
122        Subband FIR order Nw (number of taps per subband is Nw+1).
123    step : float, default=0.1
124        Global LMS step-size factor.
125    gamma : float, default=1e-2
126        Regularization term in the normalized denominator (>0 recommended).
127    a : float, default=0.01
128        Exponential smoothing factor for subband energy estimates in (0,1].
129    decimation_factor : int, optional
130        Decimation factor L. If None, uses L=M.
131    w_init : array_like, optional
132        Initial subband coefficients. Can be:
133          - shape (M, Nw+1), or
134          - flat of length M*(Nw+1), reshaped row-major.
135
136    Notes
137    -----
138    - Real-valued interface (input_signal and desired_signal enforced real).
139    - This is an *open-loop* structure: subband regressors are formed from the
140      analysis-filtered fullband input, independent of any reconstructed fullband
141      output loop.
142    - Subband MSE curves are provided as `mse_subbands = e_sb**2` and
143      `mse_overall = mean_m mse_subbands[m,k]`.
144
145    """
146    supports_complex: bool = False
147
148    M: int
149    Nw: int
150    L: int
151    step: float
152    gamma: float
153    a: float
154
155    def __init__(
156        self,
157        n_subbands: int,
158        analysis_filters: ArrayLike,
159        synthesis_filters: ArrayLike,
160        filter_order: int,
161        step: float = 0.1,
162        gamma: float = 1e-2,
163        a: float = 0.01,
164        decimation_factor: Optional[int] = None,
165        w_init: Optional[Union[np.ndarray, list]] = None,
166    ) -> None:
167        self.M = int(n_subbands)
168        if self.M <= 0:
169            raise ValueError("n_subbands must be a positive integer.")
170
171        self.Nw = int(filter_order)
172        if self.Nw <= 0:
173            raise ValueError("filter_order must be a positive integer.")
174
175        self.step = float(step)
176        self.gamma = float(gamma)
177        self.a = float(a)
178
179        hk = np.asarray(analysis_filters, dtype=float)
180        fk = np.asarray(synthesis_filters, dtype=float)
181
182        if hk.ndim != 2 or fk.ndim != 2:
183            raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).")
184        if hk.shape[0] != self.M or fk.shape[0] != self.M:
185            raise ValueError(
186                f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}."
187            )
188
189        self.hk = hk
190        self.fk = fk
191
192        self.L = int(decimation_factor) if decimation_factor is not None else self.M
193        if self.L <= 0:
194            raise ValueError("decimation_factor L must be a positive integer.")
195
196        self._n_params = int(self.M * (self.Nw + 1))
197        super().__init__(filter_order=self._n_params - 1, w_init=None)
198
199        self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float)
200        if w_init is not None:
201            w0 = np.asarray(w_init, dtype=float)
202            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
203                self.w_mat = w0.copy()
204            elif w0.ndim == 1 and w0.size == self._n_params:
205                self.w_mat = w0.reshape(self.M, self.Nw + 1).copy()
206            else:
207                raise ValueError(
208                    "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). "
209                    f"Got w_init.shape={w0.shape}."
210                )
211
212        self.w = self.w_mat.reshape(-1).astype(float, copy=False)
213        self.w_history = []
214        self._record_history()
215
216        self.w_matrix_history: list[np.ndarray] = []
217
218    def _sync_base_w(self) -> None:
219        """Keep base `self.w` consistent with the subband matrix."""
220        self.w = self.w_mat.reshape(-1).astype(float, copy=False)
221
222    @classmethod
223    def default_test_init_kwargs(cls, order: int) -> dict:
224        M = 1
225        hk = np.array([[1.0]], dtype=float)
226        fk = np.array([[1.0]], dtype=float)
227        return dict(
228            n_subbands=M,
229            analysis_filters=hk,
230            synthesis_filters=fk,
231            filter_order=order,
232            step=0.1,
233            gamma=1e-2,
234            a=0.01,
235            decimation_factor=1,
236        )
237
238    @ensure_real_signals
239    @validate_input
240    def optimize(
241        self,
242        input_signal: np.ndarray,
243        desired_signal: np.ndarray,
244        verbose: bool = False,
245        return_internal_states: bool = False,
246    ) -> OptimizationResult:
247        """
248        Run OLSBLMS adaptation.
249
250        Parameters
251        ----------
252        input_signal : array_like of float
253            Fullband input x[n], shape (N,).
254        desired_signal : array_like of float
255            Fullband desired d[n], shape (N,).
256        verbose : bool, default=False
257            If True, prints runtime and iteration count.
258        return_internal_states : bool, default=False
259            If True, returns additional internal states in result.extra.
260
261        Returns
262        -------
263        OptimizationResult
264            outputs : ndarray of float, shape (N,)
265                Fullband reconstructed output y[n] obtained by synthesis of the
266                subband outputs after adaptation.
267            errors : ndarray of float, shape (N,)
268                Fullband output error e[n] = d[n] - y[n].
269            coefficients : ndarray
270                Flattened coefficient history of w_mat, shape
271                (#snapshots, M*(Nw+1)), where snapshots are recorded once per
272                subband-iteration (decimated-time step), plus the initial entry.
273            error_type : str
274                "output_error".
275
276            extra : dict
277                Always contains:
278                  - "w_matrix_history": list of (M, Nw+1) coefficient snapshots
279                  - "subband_outputs": ndarray (M, N_iter)
280                  - "subband_errors": ndarray (M, N_iter)
281                  - "mse_subbands": ndarray (M, N_iter) with e_sb**2
282                  - "mse_overall": ndarray (N_iter,) mean subband MSE per iteration
283                If return_internal_states=True, also contains:
284                  - "sig_ol": final subband energy estimates, shape (M,)
285        """
286        tic: float = time()
287
288        x = np.asarray(input_signal, dtype=float).ravel()
289        d = np.asarray(desired_signal, dtype=float).ravel()
290
291        n_samples: int = int(x.size)
292
293        xsb_list: list[np.ndarray] = []
294        dsb_list: list[np.ndarray] = []
295        for m in range(self.M):
296            xaux_x = _fir_filter_causal(self.hk[m, :], x)
297            xaux_d = _fir_filter_causal(self.hk[m, :], d)
298            xsb_list.append(_decimate_by_L(xaux_x, self.L))
299            dsb_list.append(_decimate_by_L(xaux_d, self.L))
300
301        N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0
302        if N_iter == 0:
303            y0 = np.zeros_like(d)
304            runtime_s = float(time() - tic)
305            return self._pack_results(
306                outputs=y0,
307                errors=d - y0,
308                runtime_s=runtime_s,
309                error_type="output_error",
310                extra={
311                    "w_matrix_history": [],
312                    "subband_outputs": np.zeros((self.M, 0), dtype=float),
313                    "subband_errors": np.zeros((self.M, 0), dtype=float),
314                    "mse_subbands": np.zeros((self.M, 0), dtype=float),
315                    "mse_overall": np.zeros((0,), dtype=float),
316                },
317            )
318
319        xsb = np.vstack([arr[:N_iter] for arr in xsb_list])
320        dsb = np.vstack([arr[:N_iter] for arr in dsb_list])
321
322        y_sb = np.zeros((self.M, N_iter), dtype=float)
323        e_sb = np.zeros((self.M, N_iter), dtype=float)
324
325        x_ol = np.zeros((self.M, self.Nw + 1), dtype=float)
326        sig_ol = np.zeros((self.M,), dtype=float)
327
328        self.w_history = []
329        self._record_history()
330        self.w_matrix_history = []
331
332        for k in range(N_iter):
333            for m in range(self.M):
334                x_ol[m, 1:] = x_ol[m, :-1]
335                x_ol[m, 0] = xsb[m, k]
336
337                y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :]))
338                e_sb[m, k] = float(dsb[m, k] - y_sb[m, k])
339
340                sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2)
341
342                mu_m = (2.0 * self.step) / (self.gamma + (self.Nw + 1) * sig_ol[m])
343
344                self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :]
345
346            self.w_matrix_history.append(self.w_mat.copy())
347            self._sync_base_w()
348            self._record_history()
349
350        y_full = np.zeros((n_samples,), dtype=float)
351        for m in range(self.M):
352            y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples)
353            y_full += _fir_filter_causal(self.fk[m, :], y_up)
354
355        e_full = d - y_full
356
357        mse_subbands = e_sb ** 2
358        mse_overall = np.mean(mse_subbands, axis=0)
359
360        runtime_s: float = float(time() - tic)
361        if verbose:
362            print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}")
363
364        extra: Dict[str, Any] = {
365            "w_matrix_history": self.w_matrix_history,
366            "subband_outputs": y_sb,
367            "subband_errors": e_sb,
368            "mse_subbands": mse_subbands,
369            "mse_overall": mse_overall,
370        }
371        if return_internal_states:
372            extra["sig_ol"] = sig_ol.copy()
373
374        return self._pack_results(
375            outputs=y_full,
376            errors=e_full,
377            runtime_s=runtime_s,
378            error_type="output_error",
379            extra=extra,
380        )

Open-Loop Subband LMS (OLSBLMS) for real-valued fullband signals.

Implements the Open-Loop Subband LMS adaptive filtering algorithm (Algorithm 12.1, Diniz) using an analysis/synthesis filterbank with subband-adaptive FIR filters.

High-level operation (as implemented)

Given fullband input x[n] and desired d[n], and an M-channel analysis bank h_k[m], the algorithm proceeds in two stages:

(A) Analysis + Decimation (open-loop) For each subband m = 0..M-1: - Filter the fullband input and desired with the analysis filter: x_aux[m] = filter(hk[m], 1, x) d_aux[m] = filter(hk[m], 1, d) - Decimate by L (keep samples 0, L, 2L, ...): x_sb[m] = x_aux[m][::L] d_sb[m] = d_aux[m][::L]

The adaptation length is:
    N_iter = min_m len(x_sb[m]) and len(d_sb[m])
(i.e., all subbands are truncated to the shortest decimated sequence).

(B) Subband LMS adaptation (per-sample in decimated time) Each subband has its own tapped-delay line x_ol[m,:] of length (Nw+1) and its own coefficient vector w_mat[m,:] (also length Nw+1).

For each decimated-time index k = 0..N_iter-1, and for each subband m:
  - Update subband delay line:
        x_ol[m,0] = x_sb[m,k]
  - Compute subband output and error:
        y_sb[m,k] = w_mat[m]^T x_ol[m]
        e_sb[m,k] = d_sb[m,k] - y_sb[m,k]
  - Update a smoothed subband energy estimate:
        sig_ol[m] = (1-a) sig_ol[m] + a * x_sb[m,k]^2
  - Normalized LMS-like step:
        mu_m = (2*step) / (gamma + (Nw+1)*sig_ol[m])
  - Coefficient update:
        w_mat[m] <- w_mat[m] + mu_m * e_sb[m,k] * x_ol[m]

Fullband reconstruction (convenience synthesis)

After adaptation, a fullband output is reconstructed via the synthesis bank f_k[m]:

  • Upsample each subband output by L (zero-stuffing), then filter: y_up[m] = upsample(y_sb[m], L) y_full[m] = filter(fk[m], 1, y_up[m])
  • Sum across subbands: y[n] = sum_m y_full[m][n] The returned error is the fullband output error e[n] = d[n] - y[n].

Coefficient representation and history

  • The adaptive parameters are stored as: w_mat : ndarray, shape (M, Nw+1), dtype=float
  • For compatibility with the base class, self.w is a flattened view of w_mat (row-major), and OptimizationResult.coefficients contains the stacked history of this flattened vector (recorded once per decimated-time iteration, plus the initial entry).
  • The full (M, Nw+1) snapshots are also stored in extra["w_matrix_history"].

Parameters

n_subbands : int Number of subbands (M). analysis_filters : array_like Analysis bank hk with shape (M, Lh). synthesis_filters : array_like Synthesis bank fk with shape (M, Lf). filter_order : int Subband FIR order Nw (number of taps per subband is Nw+1). step : float, default=0.1 Global LMS step-size factor. gamma : float, default=1e-2 Regularization term in the normalized denominator (>0 recommended). a : float, default=0.01 Exponential smoothing factor for subband energy estimates in (0,1]. decimation_factor : int, optional Decimation factor L. If None, uses L=M. w_init : array_like, optional Initial subband coefficients. Can be: - shape (M, Nw+1), or - flat of length M*(Nw+1), reshaped row-major.

Notes

  • Real-valued interface (input_signal and desired_signal enforced real).
  • This is an open-loop structure: subband regressors are formed from the analysis-filtered fullband input, independent of any reconstructed fullband output loop.
  • Subband MSE curves are provided as mse_subbands = e_sb**2 and mse_overall = mean_m mse_subbands[m,k].
OLSBLMS( n_subbands: int, analysis_filters: Union[numpy.ndarray, list], synthesis_filters: Union[numpy.ndarray, list], filter_order: int, step: float = 0.1, gamma: float = 0.01, a: float = 0.01, decimation_factor: Optional[int] = None, w_init: Union[numpy.ndarray, list, NoneType] = None)
155    def __init__(
156        self,
157        n_subbands: int,
158        analysis_filters: ArrayLike,
159        synthesis_filters: ArrayLike,
160        filter_order: int,
161        step: float = 0.1,
162        gamma: float = 1e-2,
163        a: float = 0.01,
164        decimation_factor: Optional[int] = None,
165        w_init: Optional[Union[np.ndarray, list]] = None,
166    ) -> None:
167        self.M = int(n_subbands)
168        if self.M <= 0:
169            raise ValueError("n_subbands must be a positive integer.")
170
171        self.Nw = int(filter_order)
172        if self.Nw <= 0:
173            raise ValueError("filter_order must be a positive integer.")
174
175        self.step = float(step)
176        self.gamma = float(gamma)
177        self.a = float(a)
178
179        hk = np.asarray(analysis_filters, dtype=float)
180        fk = np.asarray(synthesis_filters, dtype=float)
181
182        if hk.ndim != 2 or fk.ndim != 2:
183            raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).")
184        if hk.shape[0] != self.M or fk.shape[0] != self.M:
185            raise ValueError(
186                f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}."
187            )
188
189        self.hk = hk
190        self.fk = fk
191
192        self.L = int(decimation_factor) if decimation_factor is not None else self.M
193        if self.L <= 0:
194            raise ValueError("decimation_factor L must be a positive integer.")
195
196        self._n_params = int(self.M * (self.Nw + 1))
197        super().__init__(filter_order=self._n_params - 1, w_init=None)
198
199        self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float)
200        if w_init is not None:
201            w0 = np.asarray(w_init, dtype=float)
202            if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1):
203                self.w_mat = w0.copy()
204            elif w0.ndim == 1 and w0.size == self._n_params:
205                self.w_mat = w0.reshape(self.M, self.Nw + 1).copy()
206            else:
207                raise ValueError(
208                    "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). "
209                    f"Got w_init.shape={w0.shape}."
210                )
211
212        self.w = self.w_mat.reshape(-1).astype(float, copy=False)
213        self.w_history = []
214        self._record_history()
215
216        self.w_matrix_history: list[np.ndarray] = []
supports_complex: bool = False
M: int
Nw: int
L: int
step: float
gamma: float
a: float
hk
fk
w_mat: numpy.ndarray
w
w_history
w_matrix_history: list[numpy.ndarray]
@classmethod
def default_test_init_kwargs(cls, order: int) -> dict:
222    @classmethod
223    def default_test_init_kwargs(cls, order: int) -> dict:
224        M = 1
225        hk = np.array([[1.0]], dtype=float)
226        fk = np.array([[1.0]], dtype=float)
227        return dict(
228            n_subbands=M,
229            analysis_filters=hk,
230            synthesis_filters=fk,
231            filter_order=order,
232            step=0.1,
233            gamma=1e-2,
234            a=0.01,
235            decimation_factor=1,
236        )

Override in subclasses to provide init kwargs for standardized tests.

@ensure_real_signals
@validate_input
def optimize( self, input_signal: numpy.ndarray, desired_signal: numpy.ndarray, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
238    @ensure_real_signals
239    @validate_input
240    def optimize(
241        self,
242        input_signal: np.ndarray,
243        desired_signal: np.ndarray,
244        verbose: bool = False,
245        return_internal_states: bool = False,
246    ) -> OptimizationResult:
247        """
248        Run OLSBLMS adaptation.
249
250        Parameters
251        ----------
252        input_signal : array_like of float
253            Fullband input x[n], shape (N,).
254        desired_signal : array_like of float
255            Fullband desired d[n], shape (N,).
256        verbose : bool, default=False
257            If True, prints runtime and iteration count.
258        return_internal_states : bool, default=False
259            If True, returns additional internal states in result.extra.
260
261        Returns
262        -------
263        OptimizationResult
264            outputs : ndarray of float, shape (N,)
265                Fullband reconstructed output y[n] obtained by synthesis of the
266                subband outputs after adaptation.
267            errors : ndarray of float, shape (N,)
268                Fullband output error e[n] = d[n] - y[n].
269            coefficients : ndarray
270                Flattened coefficient history of w_mat, shape
271                (#snapshots, M*(Nw+1)), where snapshots are recorded once per
272                subband-iteration (decimated-time step), plus the initial entry.
273            error_type : str
274                "output_error".
275
276            extra : dict
277                Always contains:
278                  - "w_matrix_history": list of (M, Nw+1) coefficient snapshots
279                  - "subband_outputs": ndarray (M, N_iter)
280                  - "subband_errors": ndarray (M, N_iter)
281                  - "mse_subbands": ndarray (M, N_iter) with e_sb**2
282                  - "mse_overall": ndarray (N_iter,) mean subband MSE per iteration
283                If return_internal_states=True, also contains:
284                  - "sig_ol": final subband energy estimates, shape (M,)
285        """
286        tic: float = time()
287
288        x = np.asarray(input_signal, dtype=float).ravel()
289        d = np.asarray(desired_signal, dtype=float).ravel()
290
291        n_samples: int = int(x.size)
292
293        xsb_list: list[np.ndarray] = []
294        dsb_list: list[np.ndarray] = []
295        for m in range(self.M):
296            xaux_x = _fir_filter_causal(self.hk[m, :], x)
297            xaux_d = _fir_filter_causal(self.hk[m, :], d)
298            xsb_list.append(_decimate_by_L(xaux_x, self.L))
299            dsb_list.append(_decimate_by_L(xaux_d, self.L))
300
301        N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0
302        if N_iter == 0:
303            y0 = np.zeros_like(d)
304            runtime_s = float(time() - tic)
305            return self._pack_results(
306                outputs=y0,
307                errors=d - y0,
308                runtime_s=runtime_s,
309                error_type="output_error",
310                extra={
311                    "w_matrix_history": [],
312                    "subband_outputs": np.zeros((self.M, 0), dtype=float),
313                    "subband_errors": np.zeros((self.M, 0), dtype=float),
314                    "mse_subbands": np.zeros((self.M, 0), dtype=float),
315                    "mse_overall": np.zeros((0,), dtype=float),
316                },
317            )
318
319        xsb = np.vstack([arr[:N_iter] for arr in xsb_list])
320        dsb = np.vstack([arr[:N_iter] for arr in dsb_list])
321
322        y_sb = np.zeros((self.M, N_iter), dtype=float)
323        e_sb = np.zeros((self.M, N_iter), dtype=float)
324
325        x_ol = np.zeros((self.M, self.Nw + 1), dtype=float)
326        sig_ol = np.zeros((self.M,), dtype=float)
327
328        self.w_history = []
329        self._record_history()
330        self.w_matrix_history = []
331
332        for k in range(N_iter):
333            for m in range(self.M):
334                x_ol[m, 1:] = x_ol[m, :-1]
335                x_ol[m, 0] = xsb[m, k]
336
337                y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :]))
338                e_sb[m, k] = float(dsb[m, k] - y_sb[m, k])
339
340                sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2)
341
342                mu_m = (2.0 * self.step) / (self.gamma + (self.Nw + 1) * sig_ol[m])
343
344                self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :]
345
346            self.w_matrix_history.append(self.w_mat.copy())
347            self._sync_base_w()
348            self._record_history()
349
350        y_full = np.zeros((n_samples,), dtype=float)
351        for m in range(self.M):
352            y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples)
353            y_full += _fir_filter_causal(self.fk[m, :], y_up)
354
355        e_full = d - y_full
356
357        mse_subbands = e_sb ** 2
358        mse_overall = np.mean(mse_subbands, axis=0)
359
360        runtime_s: float = float(time() - tic)
361        if verbose:
362            print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}")
363
364        extra: Dict[str, Any] = {
365            "w_matrix_history": self.w_matrix_history,
366            "subband_outputs": y_sb,
367            "subband_errors": e_sb,
368            "mse_subbands": mse_subbands,
369            "mse_overall": mse_overall,
370        }
371        if return_internal_states:
372            extra["sig_ol"] = sig_ol.copy()
373
374        return self._pack_results(
375            outputs=y_full,
376            errors=e_full,
377            runtime_s=runtime_s,
378            error_type="output_error",
379            extra=extra,
380        )

Run OLSBLMS adaptation.

Parameters

input_signal : array_like of float Fullband input x[n], shape (N,). desired_signal : array_like of float Fullband desired d[n], shape (N,). verbose : bool, default=False If True, prints runtime and iteration count. return_internal_states : bool, default=False If True, returns additional internal states in result.extra.

Returns

OptimizationResult outputs : ndarray of float, shape (N,) Fullband reconstructed output y[n] obtained by synthesis of the subband outputs after adaptation. errors : ndarray of float, shape (N,) Fullband output error e[n] = d[n] - y[n]. coefficients : ndarray Flattened coefficient history of w_mat, shape (#snapshots, M*(Nw+1)), where snapshots are recorded once per subband-iteration (decimated-time step), plus the initial entry. error_type : str "output_error".

extra : dict
    Always contains:
      - "w_matrix_history": list of (M, Nw+1) coefficient snapshots
      - "subband_outputs": ndarray (M, N_iter)
      - "subband_errors": ndarray (M, N_iter)
      - "mse_subbands": ndarray (M, N_iter) with e_sb**2
      - "mse_overall": ndarray (N_iter,) mean subband MSE per iteration
    If return_internal_states=True, also contains:
      - "sig_ol": final subband energy estimates, shape (M,)
class AffineProjectionCM(pydaptivefiltering.AdaptiveFilter):
 26class AffineProjectionCM(AdaptiveFilter):
 27    """
 28    Complex Affine-Projection Constant-Modulus (AP-CM) adaptive filter.
 29
 30    Blind affine-projection algorithm based on the constant-modulus criterion,
 31    following Diniz (Alg. 13.4). This implementation uses a *unit-modulus*
 32    reference (i.e., target magnitude equal to 1) obtained by normalizing the
 33    affine-projection output vector.
 34
 35    Parameters
 36    ----------
 37    filter_order : int, optional
 38        Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``.
 39        Default is 5.
 40    step_size : float, optional
 41        Adaptation step size ``mu``. Default is 0.1.
 42    memory_length : int, optional
 43        Reuse factor ``L`` (number of past regressors reused). The affine-
 44        projection block size is therefore ``P = L + 1`` columns. Default is 2.
 45    gamma : float, optional
 46        Levenberg-Marquardt regularization factor ``gamma`` used in the
 47        ``(L + 1) x (L + 1)`` normal-equation system for numerical stability.
 48        Default is 1e-6.
 49    w_init : array_like of complex, optional
 50        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 51        initializes with zeros.
 52
 53    Notes
 54    -----
 55    At iteration ``k``, form the regressor block matrix:
 56
 57    - ``X(k) ∈ C^{(M+1) x (L+1)}``, whose columns are the most recent regressor
 58    vectors (newest in column 0).
 59
 60    The affine-projection output vector is:
 61
 62    .. math::
 63        y_{ap}(k) = X^H(k) w(k)  \\in \\mathbb{C}^{L+1}.
 64
 65    This implementation uses a *unit-circle projection* (normalization) as the
 66    constant-modulus "reference":
 67
 68    .. math::
 69        d_{ap}(k) = \\frac{y_{ap}(k)}{|y_{ap}(k)|},
 70
 71    applied element-wise, with a small threshold to avoid division by zero.
 72
 73    The error vector is:
 74
 75    .. math::
 76        e_{ap}(k) = d_{ap}(k) - y_{ap}(k).
 77
 78    The update direction ``g(k)`` is obtained by solving the regularized system:
 79
 80    .. math::
 81        (X^H(k) X(k) + \\gamma I_{L+1})\\, g(k) = e_{ap}(k),
 82
 83    and the coefficient update is:
 84
 85    .. math::
 86        w(k+1) = w(k) + \\mu X(k) g(k).
 87
 88    References
 89    ----------
 90    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 91    Implementation*, 5th ed., Algorithm 13.4.
 92    """
 93    supports_complex: bool = True
 94    step_size: float
 95    memory_length: int
 96    gamma: float
 97    n_coeffs: int
 98
 99    def __init__(
100        self,
101        filter_order: int = 5,
102        step_size: float = 0.1,
103        memory_length: int = 2,
104        gamma: float = 1e-6,
105        w_init: Optional[Union[np.ndarray, list]] = None,
106    ) -> None:
107        super().__init__(filter_order, w_init=w_init)
108        self.step_size = float(step_size)
109        self.memory_length = int(memory_length)
110        self.gamma = float(gamma)
111        self.n_coeffs = int(filter_order + 1)
112
113    def optimize(
114        self,
115        input_signal: Union[np.ndarray, list],
116        desired_signal: Optional[Union[np.ndarray, list]] = None,
117        verbose: bool = False,
118        return_internal_states: bool = False,
119    ) -> OptimizationResult:
120        """
121        Executes the AP-CM adaptation loop over an input sequence.
122
123        Parameters
124        ----------
125        input_signal : array_like of complex
126            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
127        desired_signal : None, optional
128            Ignored. This is a blind algorithm: the reference is derived from
129            the output via unit-modulus normalization.
130        verbose : bool, optional
131            If True, prints the total runtime after completion.
132        return_internal_states : bool, optional
133            If True, includes the last internal states in ``result.extra``:
134            ``"last_update_factor"`` (``g(k)``) and ``"last_regressor_matrix"``
135            (``X(k)``).
136
137        Returns
138        -------
139        OptimizationResult
140            Result object with fields:
141            - outputs : ndarray of complex, shape ``(N,)``
142                Scalar output sequence, ``y[k] = y_ap(k)[0]``.
143            - errors : ndarray of complex, shape ``(N,)``
144                Scalar CM error sequence, ``e[k] = e_ap(k)[0]``.
145            - coefficients : ndarray of complex
146                Coefficient history recorded by the base class.
147            - error_type : str
148                Set to ``"blind_constant_modulus"``.
149            - extra : dict, optional
150                Present only if ``return_internal_states=True``.
151        """
152        tic: float = time()
153
154        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
155        n_samples: int = int(x.size)
156
157        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
158        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
159
160        L: int = int(self.memory_length)
161
162        regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex)
163        I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex)
164
165        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
166        x_padded[self.filter_order:] = x
167
168        last_update_factor: Optional[np.ndarray] = None
169
170        for k in range(n_samples):
171            regressor_matrix[:, 1:] = regressor_matrix[:, :-1]
172            regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1]
173
174            output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w)
175
176            abs_out: np.ndarray = np.abs(output_ap)
177            desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex)
178            np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12)
179
180            error_ap: np.ndarray = desired_level - output_ap
181
182            phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg
183            update_factor: np.ndarray = np.linalg.solve(phi, error_ap)
184            last_update_factor = update_factor
185
186            self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor)
187
188            outputs[k] = output_ap[0]
189            errors[k] = error_ap[0]
190
191            self._record_history()
192
193        runtime_s: float = float(time() - tic)
194        if verbose:
195            print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms")
196
197        extra: Optional[Dict[str, Any]] = None
198        if return_internal_states:
199            extra = {
200                "last_update_factor": last_update_factor,
201                "last_regressor_matrix": regressor_matrix.copy(),
202            }
203
204        return self._pack_results(
205            outputs=outputs,
206            errors=errors,
207            runtime_s=runtime_s,
208            error_type="blind_constant_modulus",
209            extra=extra,
210        )

Complex Affine-Projection Constant-Modulus (AP-CM) adaptive filter.

Blind affine-projection algorithm based on the constant-modulus criterion, following Diniz (Alg. 13.4). This implementation uses a unit-modulus reference (i.e., target magnitude equal to 1) obtained by normalizing the affine-projection output vector.

Parameters

filter_order : int, optional Adaptive FIR filter order M. The number of coefficients is M + 1. Default is 5. step_size : float, optional Adaptation step size mu. Default is 0.1. memory_length : int, optional Reuse factor L (number of past regressors reused). The affine- projection block size is therefore P = L + 1 columns. Default is 2. gamma : float, optional Levenberg-Marquardt regularization factor gamma used in the (L + 1) x (L + 1) normal-equation system for numerical stability. Default is 1e-6. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

At iteration k, form the regressor block matrix:

  • X(k) ∈ C^{(M+1) x (L+1)}, whose columns are the most recent regressor vectors (newest in column 0).

The affine-projection output vector is:

$$y_{ap}(k) = X^H(k) w(k) \in \mathbb{C}^{L+1}.$$

This implementation uses a unit-circle projection (normalization) as the constant-modulus "reference":

$$d_{ap}(k) = \frac{y_{ap}(k)}{|y_{ap}(k)|},$$

applied element-wise, with a small threshold to avoid division by zero.

The error vector is:

$$e_{ap}(k) = d_{ap}(k) - y_{ap}(k).$$

The update direction g(k) is obtained by solving the regularized system:

$$(X^H(k) X(k) + \gamma I_{L+1})\, g(k) = e_{ap}(k),$$

and the coefficient update is:

$$w(k+1) = w(k) + \mu X(k) g(k).$$

References

Implementation*, 5th ed., Algorithm 13.4.


AffineProjectionCM( filter_order: int = 5, step_size: float = 0.1, memory_length: int = 2, gamma: float = 1e-06, w_init: Union[numpy.ndarray, list, NoneType] = None)
 99    def __init__(
100        self,
101        filter_order: int = 5,
102        step_size: float = 0.1,
103        memory_length: int = 2,
104        gamma: float = 1e-6,
105        w_init: Optional[Union[np.ndarray, list]] = None,
106    ) -> None:
107        super().__init__(filter_order, w_init=w_init)
108        self.step_size = float(step_size)
109        self.memory_length = int(memory_length)
110        self.gamma = float(gamma)
111        self.n_coeffs = int(filter_order + 1)
supports_complex: bool = True
step_size: float
memory_length: int
gamma: float
n_coeffs: int
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list, NoneType] = None, verbose: bool = False, return_internal_states: bool = False) -> pydaptivefiltering.base.OptimizationResult:
113    def optimize(
114        self,
115        input_signal: Union[np.ndarray, list],
116        desired_signal: Optional[Union[np.ndarray, list]] = None,
117        verbose: bool = False,
118        return_internal_states: bool = False,
119    ) -> OptimizationResult:
120        """
121        Executes the AP-CM adaptation loop over an input sequence.
122
123        Parameters
124        ----------
125        input_signal : array_like of complex
126            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
127        desired_signal : None, optional
128            Ignored. This is a blind algorithm: the reference is derived from
129            the output via unit-modulus normalization.
130        verbose : bool, optional
131            If True, prints the total runtime after completion.
132        return_internal_states : bool, optional
133            If True, includes the last internal states in ``result.extra``:
134            ``"last_update_factor"`` (``g(k)``) and ``"last_regressor_matrix"``
135            (``X(k)``).
136
137        Returns
138        -------
139        OptimizationResult
140            Result object with fields:
141            - outputs : ndarray of complex, shape ``(N,)``
142                Scalar output sequence, ``y[k] = y_ap(k)[0]``.
143            - errors : ndarray of complex, shape ``(N,)``
144                Scalar CM error sequence, ``e[k] = e_ap(k)[0]``.
145            - coefficients : ndarray of complex
146                Coefficient history recorded by the base class.
147            - error_type : str
148                Set to ``"blind_constant_modulus"``.
149            - extra : dict, optional
150                Present only if ``return_internal_states=True``.
151        """
152        tic: float = time()
153
154        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
155        n_samples: int = int(x.size)
156
157        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
158        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
159
160        L: int = int(self.memory_length)
161
162        regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex)
163        I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex)
164
165        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
166        x_padded[self.filter_order:] = x
167
168        last_update_factor: Optional[np.ndarray] = None
169
170        for k in range(n_samples):
171            regressor_matrix[:, 1:] = regressor_matrix[:, :-1]
172            regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1]
173
174            output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w)
175
176            abs_out: np.ndarray = np.abs(output_ap)
177            desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex)
178            np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12)
179
180            error_ap: np.ndarray = desired_level - output_ap
181
182            phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg
183            update_factor: np.ndarray = np.linalg.solve(phi, error_ap)
184            last_update_factor = update_factor
185
186            self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor)
187
188            outputs[k] = output_ap[0]
189            errors[k] = error_ap[0]
190
191            self._record_history()
192
193        runtime_s: float = float(time() - tic)
194        if verbose:
195            print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms")
196
197        extra: Optional[Dict[str, Any]] = None
198        if return_internal_states:
199            extra = {
200                "last_update_factor": last_update_factor,
201                "last_regressor_matrix": regressor_matrix.copy(),
202            }
203
204        return self._pack_results(
205            outputs=outputs,
206            errors=errors,
207            runtime_s=runtime_s,
208            error_type="blind_constant_modulus",
209            extra=extra,
210        )

Executes the AP-CM adaptation loop over an input sequence.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : None, optional Ignored. This is a blind algorithm: the reference is derived from the output via unit-modulus normalization. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes the last internal states in result.extra: "last_update_factor" (g(k)) and "last_regressor_matrix" (X(k)).

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Scalar output sequence, y[k] = y_ap(k)[0]. - errors : ndarray of complex, shape (N,) Scalar CM error sequence, e[k] = e_ap(k)[0]. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "blind_constant_modulus". - extra : dict, optional Present only if return_internal_states=True.

class CMA(pydaptivefiltering.AdaptiveFilter):
 25class CMA(AdaptiveFilter):
 26    """
 27    Constant-Modulus Algorithm (CMA) for blind adaptive filtering (complex-valued).
 28
 29    The CMA adapts an FIR equalizer to produce an output with (approximately)
 30    constant modulus, making it useful for blind equalization of constant-envelope
 31    and near-constant-envelope modulations (e.g., PSK and some QAM regimes).
 32
 33    This implementation follows Diniz (Alg. 13.2) using the classical CMA(2,2)
 34    instantaneous gradient approximation.
 35
 36    Parameters
 37    ----------
 38    filter_order : int, optional
 39        FIR filter order ``M``. The number of coefficients is ``M + 1``.
 40        Default is 5.
 41    step_size : float, optional
 42        Adaptation step size ``mu``. Default is 0.01.
 43    w_init : array_like of complex, optional
 44        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 45        initializes with zeros.
 46
 47    Notes
 48    -----
 49    Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the
 50    filter output:
 51
 52    .. math::
 53        y(k) = w^H(k) x_k.
 54
 55    CMA(2,2) is commonly derived from minimizing the instantaneous cost:
 56
 57    .. math::
 58        J(k) = \\left(|y(k)|^2 - R_2\\right)^2,
 59
 60    where ``R2`` is the dispersion constant. Using an instantaneous gradient
 61    approximation, define the scalar error:
 62
 63    .. math::
 64        e(k) = |y(k)|^2 - R_2,
 65
 66    and the (complex) gradient factor:
 67
 68    .. math::
 69        \\phi(k) = 2\\, e(k)\\, y^*(k).
 70
 71    The coefficient update is then:
 72
 73    .. math::
 74        w(k+1) = w(k) - \\mu\\, \\phi(k)\\, x_k.
 75
 76    Dispersion constant
 77    ~~~~~~~~~~~~~~~~~~~
 78    In theory, ``R2`` depends on the source constellation statistics and is
 79    often written as:
 80
 81    .. math::
 82        R_2 = \\frac{\\mathbb{E}[|s(k)|^4]}{\\mathbb{E}[|s(k)|^2]}.
 83
 84    In practice, when the source ``s(k)`` is not available (blind setting),
 85    ``R2`` is typically chosen from prior knowledge of the modulation or
 86    estimated from a proxy sequence. If this implementation estimates ``R2``
 87    from data, it should specify which sequence is used (e.g., input vs output).
 88
 89    References
 90    ----------
 91    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 92       Implementation*, 5th ed., Algorithm 13.2.
 93    """
 94    supports_complex: bool = True
 95    step_size: float
 96    n_coeffs: int
 97
 98    def __init__(
 99        self,
100        filter_order: int = 5,
101        step_size: float = 0.01,
102        w_init: Optional[Union[np.ndarray, list]] = None,
103    ) -> None:
104        super().__init__(filter_order, w_init=w_init)
105        self.step_size = float(step_size)
106        self.n_coeffs = int(filter_order + 1)
107
108    def optimize(
109        self,
110        input_signal: Union[np.ndarray, list],
111        desired_signal: Optional[Union[np.ndarray, list]] = None,
112        verbose: bool = False,
113        return_internal_states: bool = False,
114        safe_eps: float = 1e-12,
115    ) -> OptimizationResult:
116        """
117        Executes the CMA adaptation loop over an input sequence.
118
119        Parameters
120        ----------
121        input_signal : array_like of complex
122            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
123        desired_signal : None, optional
124            Ignored. This is a blind algorithm: it does not require a desired
125            reference signal.
126        verbose : bool, optional
127            If True, prints the total runtime after completion.
128        return_internal_states : bool, optional
129            If True, includes internal quantities in ``result.extra`` (e.g.,
130            the dispersion constant ``R2`` and/or the last/trajectory of
131            ``phi(k)`` depending on the implementation).
132        safe_eps : float, optional
133            Small epsilon used to avoid division by zero if ``R2`` is estimated
134            from sample moments. Default is 1e-12.
135
136        Returns
137        -------
138        OptimizationResult
139            Result object with fields:
140            - outputs : ndarray of complex, shape ``(N,)``
141                Output sequence ``y[k]``.
142            - errors : ndarray of float or complex, shape ``(N,)``
143                CMA error sequence ``e[k] = |y(k)|^2 - R2`` (usually real-valued).
144            - coefficients : ndarray of complex
145                Coefficient history recorded by the base class.
146            - error_type : str
147                Set to ``"blind_constant_modulus"``.
148            - extra : dict, optional
149                Present only if ``return_internal_states=True``.
150        """
151        tic: float = time()
152
153        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
154        n_samples: int = int(x.size)
155
156        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
157        errors: np.ndarray = np.zeros(n_samples, dtype=float)
158        
159        denom: float = float(np.mean(np.abs(x) ** 2))
160        if denom < safe_eps:
161            desired_level: float = 0.0
162        else:
163            desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps))
164
165        phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
166
167        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
168        x_padded[self.filter_order:] = x
169
170        for k in range(n_samples):
171            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
172
173            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
174            outputs[k] = y_k
175
176            e_k: float = float((np.abs(y_k) ** 2) - desired_level)
177            errors[k] = e_k
178
179            phi_k: complex = complex(2.0 * e_k * np.conj(y_k))
180            if return_internal_states and phi_track is not None:
181                phi_track[k] = phi_k
182
183            self.w = self.w - self.step_size * phi_k * x_k
184            self._record_history()
185
186        runtime_s: float = float(time() - tic)
187        if verbose:
188            print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms")
189
190        extra: Optional[Dict[str, Any]] = None
191        if return_internal_states:
192            extra = {
193                "dispersion_constant": desired_level,
194                "instantaneous_phi": phi_track,
195            }
196
197        return self._pack_results(
198            outputs=outputs,
199            errors=errors,
200            runtime_s=runtime_s,
201            error_type="blind_constant_modulus",
202            extra=extra,
203        )

Constant-Modulus Algorithm (CMA) for blind adaptive filtering (complex-valued).

The CMA adapts an FIR equalizer to produce an output with (approximately) constant modulus, making it useful for blind equalization of constant-envelope and near-constant-envelope modulations (e.g., PSK and some QAM regimes).

This implementation follows Diniz (Alg. 13.2) using the classical CMA(2,2) instantaneous gradient approximation.

Parameters

filter_order : int, optional FIR filter order M. The number of coefficients is M + 1. Default is 5. step_size : float, optional Adaptation step size mu. Default is 0.01. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the filter output:

$$y(k) = w^H(k) x_k.$$

CMA(2,2) is commonly derived from minimizing the instantaneous cost:

$$J(k) = \left(|y(k)|^2 - R_2\right)^2,$$

where R2 is the dispersion constant. Using an instantaneous gradient approximation, define the scalar error:

$$e(k) = |y(k)|^2 - R_2,$$

and the (complex) gradient factor:

$$\phi(k) = 2\, e(k)\, y^*(k).$$

The coefficient update is then:

$$w(k+1) = w(k) - \mu\, \phi(k)\, x_k.$$

Dispersion constant ~~~~~~~ In theory, R2 depends on the source constellation statistics and is often written as:

$$R_2 = \frac{\mathbb{E}[|s(k)|^4]}{\mathbb{E}[|s(k)|^2]}.$$

In practice, when the source s(k) is not available (blind setting), R2 is typically chosen from prior knowledge of the modulation or estimated from a proxy sequence. If this implementation estimates R2 from data, it should specify which sequence is used (e.g., input vs output).

References


CMA( filter_order: int = 5, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
 98    def __init__(
 99        self,
100        filter_order: int = 5,
101        step_size: float = 0.01,
102        w_init: Optional[Union[np.ndarray, list]] = None,
103    ) -> None:
104        super().__init__(filter_order, w_init=w_init)
105        self.step_size = float(step_size)
106        self.n_coeffs = int(filter_order + 1)
supports_complex: bool = True
step_size: float
n_coeffs: int
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list, NoneType] = None, verbose: bool = False, return_internal_states: bool = False, safe_eps: float = 1e-12) -> pydaptivefiltering.base.OptimizationResult:
108    def optimize(
109        self,
110        input_signal: Union[np.ndarray, list],
111        desired_signal: Optional[Union[np.ndarray, list]] = None,
112        verbose: bool = False,
113        return_internal_states: bool = False,
114        safe_eps: float = 1e-12,
115    ) -> OptimizationResult:
116        """
117        Executes the CMA adaptation loop over an input sequence.
118
119        Parameters
120        ----------
121        input_signal : array_like of complex
122            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
123        desired_signal : None, optional
124            Ignored. This is a blind algorithm: it does not require a desired
125            reference signal.
126        verbose : bool, optional
127            If True, prints the total runtime after completion.
128        return_internal_states : bool, optional
129            If True, includes internal quantities in ``result.extra`` (e.g.,
130            the dispersion constant ``R2`` and/or the last/trajectory of
131            ``phi(k)`` depending on the implementation).
132        safe_eps : float, optional
133            Small epsilon used to avoid division by zero if ``R2`` is estimated
134            from sample moments. Default is 1e-12.
135
136        Returns
137        -------
138        OptimizationResult
139            Result object with fields:
140            - outputs : ndarray of complex, shape ``(N,)``
141                Output sequence ``y[k]``.
142            - errors : ndarray of float or complex, shape ``(N,)``
143                CMA error sequence ``e[k] = |y(k)|^2 - R2`` (usually real-valued).
144            - coefficients : ndarray of complex
145                Coefficient history recorded by the base class.
146            - error_type : str
147                Set to ``"blind_constant_modulus"``.
148            - extra : dict, optional
149                Present only if ``return_internal_states=True``.
150        """
151        tic: float = time()
152
153        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
154        n_samples: int = int(x.size)
155
156        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
157        errors: np.ndarray = np.zeros(n_samples, dtype=float)
158        
159        denom: float = float(np.mean(np.abs(x) ** 2))
160        if denom < safe_eps:
161            desired_level: float = 0.0
162        else:
163            desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps))
164
165        phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
166
167        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
168        x_padded[self.filter_order:] = x
169
170        for k in range(n_samples):
171            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
172
173            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
174            outputs[k] = y_k
175
176            e_k: float = float((np.abs(y_k) ** 2) - desired_level)
177            errors[k] = e_k
178
179            phi_k: complex = complex(2.0 * e_k * np.conj(y_k))
180            if return_internal_states and phi_track is not None:
181                phi_track[k] = phi_k
182
183            self.w = self.w - self.step_size * phi_k * x_k
184            self._record_history()
185
186        runtime_s: float = float(time() - tic)
187        if verbose:
188            print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms")
189
190        extra: Optional[Dict[str, Any]] = None
191        if return_internal_states:
192            extra = {
193                "dispersion_constant": desired_level,
194                "instantaneous_phi": phi_track,
195            }
196
197        return self._pack_results(
198            outputs=outputs,
199            errors=errors,
200            runtime_s=runtime_s,
201            error_type="blind_constant_modulus",
202            extra=extra,
203        )

Executes the CMA adaptation loop over an input sequence.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : None, optional Ignored. This is a blind algorithm: it does not require a desired reference signal. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes internal quantities in result.extra (e.g., the dispersion constant R2 and/or the last/trajectory of phi(k) depending on the implementation). safe_eps : float, optional Small epsilon used to avoid division by zero if R2 is estimated from sample moments. Default is 1e-12.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Output sequence y[k]. - errors : ndarray of float or complex, shape (N,) CMA error sequence e[k] = |y(k)|^2 - R2 (usually real-valued). - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "blind_constant_modulus". - extra : dict, optional Present only if return_internal_states=True.

class Godard(pydaptivefiltering.AdaptiveFilter):
 25class Godard(AdaptiveFilter):
 26    """
 27    Godard blind adaptive algorithm (complex-valued).
 28
 29    The Godard criterion generalizes constant-modulus equalization by using
 30    exponents ``p`` and ``q`` in a family of dispersion-based cost functions.
 31    It is commonly used for blind channel equalization and includes CMA(2,2)
 32    as a special case.
 33
 34    This implementation follows Diniz (Alg. 13.1) and estimates the dispersion
 35    constant ``R_q`` directly from the *input sequence* via sample moments.
 36
 37    Parameters
 38    ----------
 39    filter_order : int, optional
 40        FIR filter order ``M``. The number of coefficients is ``M + 1``.
 41        Default is 5.
 42    step_size : float, optional
 43        Adaptation step size ``mu``. Default is 0.01.
 44    p_exponent : int, optional
 45        Exponent ``p`` used in the Godard cost / gradient factor. Default is 2.
 46    q_exponent : int, optional
 47        Exponent ``q`` used in the modulus term. Default is 2.
 48    w_init : array_like of complex, optional
 49        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 50        initializes with zeros.
 51
 52    Notes
 53    -----
 54    Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the
 55    output:
 56
 57    .. math::
 58        y(k) = w^H(k) x_k.
 59
 60    Define the dispersion error (scalar):
 61
 62    .. math::
 63        e(k) = |y(k)|^q - R_q.
 64
 65    In this implementation, the dispersion constant is estimated from the input
 66    using sample moments:
 67
 68    .. math::
 69        R_q \\approx \\frac{\\mathbb{E}[|x|^{2q}]}{\\mathbb{E}[|x|^q]}
 70        \\approx \\frac{\\frac{1}{N}\\sum_k |x(k)|^{2q}}
 71                     {\\frac{1}{N}\\sum_k |x(k)|^q},
 72
 73    with a small ``safe_eps`` to prevent division by zero.
 74
 75    The instantaneous complex gradient factor is computed as:
 76
 77    .. math::
 78        \\phi(k) = p\\,q\\, e(k)^{p-1}\\, |y(k)|^{q-2}\\, y^*(k),
 79
 80    and the coefficient update used here is:
 81
 82    .. math::
 83        w(k+1) = w(k) - \\frac{\\mu}{2}\\, \\phi(k)\\, x_k.
 84
 85    Numerical stability
 86    ~~~~~~~~~~~~~~~~~~~
 87    When ``|y(k)|`` is very small, the term ``|y(k)|^{q-2}`` can be ill-defined
 88    for ``q < 2`` or can amplify noise. This implementation sets ``phi(k)=0``
 89    when ``|y(k)| <= safe_eps``.
 90
 91    References
 92    ----------
 93    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 94       Implementation*, 5th ed., Algorithm 13.1.
 95    """
 96
 97    supports_complex: bool = True
 98    step_size: float
 99    p: int
100    q: int
101    n_coeffs: int
102
103    def __init__(
104        self,
105        filter_order: int = 5,
106        step_size: float = 0.01,
107        p_exponent: int = 2,
108        q_exponent: int = 2,
109        w_init: Optional[Union[np.ndarray, list]] = None,
110    ) -> None:
111        super().__init__(filter_order, w_init=w_init)
112        self.step_size = float(step_size)
113        self.p = int(p_exponent)
114        self.q = int(q_exponent)
115        self.n_coeffs = int(filter_order + 1)
116
117    def optimize(
118        self,
119        input_signal: Union[np.ndarray, list],
120        desired_signal: Optional[Union[np.ndarray, list]] = None,
121        verbose: bool = False,
122        return_internal_states: bool = False,
123        safe_eps: float = 1e-12,
124    ) -> OptimizationResult:
125        """
126        Executes the Godard adaptation loop over an input sequence.
127
128        Parameters
129        ----------
130        input_signal : array_like of complex
131            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
132        desired_signal : None, optional
133            Ignored. This is a blind algorithm: no desired reference is used.
134        verbose : bool, optional
135            If True, prints the total runtime after completion.
136        return_internal_states : bool, optional
137            If True, includes internal signals in ``result.extra``:
138            ``"dispersion_constant"`` (estimated ``R_q``) and ``"phi_gradient"``
139            (trajectory of ``phi(k)`` with shape ``(N,)``).
140        safe_eps : float, optional
141            Small epsilon used to avoid division by zero when estimating
142            ``R_q`` and to gate the computation of ``phi(k)`` when ``|y(k)|`` is
143            close to zero. Default is 1e-12.
144
145        Returns
146        -------
147        OptimizationResult
148            Result object with fields:
149            - outputs : ndarray of complex, shape ``(N,)``
150                Output sequence ``y[k]``.
151            - errors : ndarray of float, shape ``(N,)``
152                Dispersion error sequence ``e[k] = |y(k)|^q - R_q``.
153            - coefficients : ndarray of complex
154                Coefficient history recorded by the base class.
155            - error_type : str
156                Set to ``"blind_godard"``.
157            - extra : dict, optional
158                Present only if ``return_internal_states=True``.
159        """
160        tic: float = time()
161
162        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
163        n_samples: int = int(x.size)
164
165        num: float = float(np.mean(np.abs(x) ** (2 * self.q)))
166        den: float = float(np.mean(np.abs(x) ** self.q))
167        desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0
168
169        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
170        errors: np.ndarray = np.zeros(n_samples, dtype=float)
171
172        phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
173
174        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
175        x_padded[self.filter_order:] = x
176
177        for k in range(n_samples):
178            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
179
180            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
181            outputs[k] = y_k
182
183            e_k: float = float((np.abs(y_k) ** self.q) - desired_level)
184            errors[k] = e_k
185
186            if np.abs(y_k) > safe_eps:
187                phi_k: complex = complex(
188                    self.p
189                    * self.q
190                    * (e_k ** (self.p - 1))
191                    * (np.abs(y_k) ** (self.q - 2))
192                    * np.conj(y_k)
193                )
194            else:
195                phi_k = 0.0 + 0.0j
196
197            if return_internal_states and phi_track is not None:
198                phi_track[k] = phi_k
199
200            self.w = self.w - (self.step_size * phi_k * x_k) / 2.0
201            self._record_history()
202
203        runtime_s: float = float(time() - tic)
204        if verbose:
205            print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms")
206
207        extra: Optional[Dict[str, Any]] = None
208        if return_internal_states:
209            extra = {
210                "phi_gradient": phi_track,
211                "dispersion_constant": desired_level,
212            }
213
214        return self._pack_results(
215            outputs=outputs,
216            errors=errors,
217            runtime_s=runtime_s,
218            error_type="blind_godard",
219            extra=extra,
220        )

Godard blind adaptive algorithm (complex-valued).

The Godard criterion generalizes constant-modulus equalization by using exponents p and q in a family of dispersion-based cost functions. It is commonly used for blind channel equalization and includes CMA(2,2) as a special case.

This implementation follows Diniz (Alg. 13.1) and estimates the dispersion constant R_q directly from the input sequence via sample moments.

Parameters

filter_order : int, optional FIR filter order M. The number of coefficients is M + 1. Default is 5. step_size : float, optional Adaptation step size mu. Default is 0.01. p_exponent : int, optional Exponent p used in the Godard cost / gradient factor. Default is 2. q_exponent : int, optional Exponent q used in the modulus term. Default is 2. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the output:

$$y(k) = w^H(k) x_k.$$

Define the dispersion error (scalar):

$$e(k) = |y(k)|^q - R_q.$$

In this implementation, the dispersion constant is estimated from the input using sample moments:

$$R_q \approx \frac{\mathbb{E}[|x|^{2q}]}{\mathbb{E}[|x|^q]} \approx \frac{\frac{1}{N}\sum_k |x(k)|^{2q}} {\frac{1}{N}\sum_k |x(k)|^q},$$

with a small safe_eps to prevent division by zero.

The instantaneous complex gradient factor is computed as:

$$\phi(k) = p\,q\, e(k)^{p-1}\, |y(k)|^{q-2}\, y^*(k),$$

and the coefficient update used here is:

$$w(k+1) = w(k) - \frac{\mu}{2}\, \phi(k)\, x_k.$$

Numerical stability ~~~~~~~ When |y(k)| is very small, the term |y(k)|^{q-2} can be ill-defined for q < 2 or can amplify noise. This implementation sets phi(k)=0 when |y(k)| <= safe_eps.

References


Godard( filter_order: int = 5, step_size: float = 0.01, p_exponent: int = 2, q_exponent: int = 2, w_init: Union[numpy.ndarray, list, NoneType] = None)
103    def __init__(
104        self,
105        filter_order: int = 5,
106        step_size: float = 0.01,
107        p_exponent: int = 2,
108        q_exponent: int = 2,
109        w_init: Optional[Union[np.ndarray, list]] = None,
110    ) -> None:
111        super().__init__(filter_order, w_init=w_init)
112        self.step_size = float(step_size)
113        self.p = int(p_exponent)
114        self.q = int(q_exponent)
115        self.n_coeffs = int(filter_order + 1)
supports_complex: bool = True
step_size: float
p: int
q: int
n_coeffs: int
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list, NoneType] = None, verbose: bool = False, return_internal_states: bool = False, safe_eps: float = 1e-12) -> pydaptivefiltering.base.OptimizationResult:
117    def optimize(
118        self,
119        input_signal: Union[np.ndarray, list],
120        desired_signal: Optional[Union[np.ndarray, list]] = None,
121        verbose: bool = False,
122        return_internal_states: bool = False,
123        safe_eps: float = 1e-12,
124    ) -> OptimizationResult:
125        """
126        Executes the Godard adaptation loop over an input sequence.
127
128        Parameters
129        ----------
130        input_signal : array_like of complex
131            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
132        desired_signal : None, optional
133            Ignored. This is a blind algorithm: no desired reference is used.
134        verbose : bool, optional
135            If True, prints the total runtime after completion.
136        return_internal_states : bool, optional
137            If True, includes internal signals in ``result.extra``:
138            ``"dispersion_constant"`` (estimated ``R_q``) and ``"phi_gradient"``
139            (trajectory of ``phi(k)`` with shape ``(N,)``).
140        safe_eps : float, optional
141            Small epsilon used to avoid division by zero when estimating
142            ``R_q`` and to gate the computation of ``phi(k)`` when ``|y(k)|`` is
143            close to zero. Default is 1e-12.
144
145        Returns
146        -------
147        OptimizationResult
148            Result object with fields:
149            - outputs : ndarray of complex, shape ``(N,)``
150                Output sequence ``y[k]``.
151            - errors : ndarray of float, shape ``(N,)``
152                Dispersion error sequence ``e[k] = |y(k)|^q - R_q``.
153            - coefficients : ndarray of complex
154                Coefficient history recorded by the base class.
155            - error_type : str
156                Set to ``"blind_godard"``.
157            - extra : dict, optional
158                Present only if ``return_internal_states=True``.
159        """
160        tic: float = time()
161
162        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
163        n_samples: int = int(x.size)
164
165        num: float = float(np.mean(np.abs(x) ** (2 * self.q)))
166        den: float = float(np.mean(np.abs(x) ** self.q))
167        desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0
168
169        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
170        errors: np.ndarray = np.zeros(n_samples, dtype=float)
171
172        phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
173
174        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
175        x_padded[self.filter_order:] = x
176
177        for k in range(n_samples):
178            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
179
180            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
181            outputs[k] = y_k
182
183            e_k: float = float((np.abs(y_k) ** self.q) - desired_level)
184            errors[k] = e_k
185
186            if np.abs(y_k) > safe_eps:
187                phi_k: complex = complex(
188                    self.p
189                    * self.q
190                    * (e_k ** (self.p - 1))
191                    * (np.abs(y_k) ** (self.q - 2))
192                    * np.conj(y_k)
193                )
194            else:
195                phi_k = 0.0 + 0.0j
196
197            if return_internal_states and phi_track is not None:
198                phi_track[k] = phi_k
199
200            self.w = self.w - (self.step_size * phi_k * x_k) / 2.0
201            self._record_history()
202
203        runtime_s: float = float(time() - tic)
204        if verbose:
205            print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms")
206
207        extra: Optional[Dict[str, Any]] = None
208        if return_internal_states:
209            extra = {
210                "phi_gradient": phi_track,
211                "dispersion_constant": desired_level,
212            }
213
214        return self._pack_results(
215            outputs=outputs,
216            errors=errors,
217            runtime_s=runtime_s,
218            error_type="blind_godard",
219            extra=extra,
220        )

Executes the Godard adaptation loop over an input sequence.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : None, optional Ignored. This is a blind algorithm: no desired reference is used. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes internal signals in result.extra: "dispersion_constant" (estimated R_q) and "phi_gradient" (trajectory of phi(k) with shape (N,)). safe_eps : float, optional Small epsilon used to avoid division by zero when estimating R_q and to gate the computation of phi(k) when |y(k)| is close to zero. Default is 1e-12.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Output sequence y[k]. - errors : ndarray of float, shape (N,) Dispersion error sequence e[k] = |y(k)|^q - R_q. - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "blind_godard". - extra : dict, optional Present only if return_internal_states=True.

class Sato(pydaptivefiltering.AdaptiveFilter):
 25class Sato(AdaptiveFilter):
 26    """
 27    Sato blind adaptive algorithm (complex-valued).
 28
 29    The Sato criterion is an early blind equalization method particularly
 30    associated with multilevel PAM/QAM-type signals. It adapts an FIR equalizer
 31    by pulling the output toward a fixed magnitude level through the complex
 32    sign function, using a dispersion constant ``zeta``.
 33
 34    This implementation follows Diniz (Alg. 13.3) and estimates ``zeta`` from
 35    the *input sequence* via sample moments.
 36
 37    Parameters
 38    ----------
 39    filter_order : int, optional
 40        FIR filter order ``M``. The number of coefficients is ``M + 1``.
 41        Default is 5.
 42    step_size : float, optional
 43        Adaptation step size ``mu``. Default is 0.01.
 44    w_init : array_like of complex, optional
 45        Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None,
 46        initializes with zeros.
 47
 48    Notes
 49    -----
 50    Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the
 51    output:
 52
 53    .. math::
 54        y(k) = w^H(k) x_k.
 55
 56    Define the complex sign function (unit-circle projection):
 57
 58    .. math::
 59        \\mathrm{csgn}(y) =
 60        \\begin{cases}
 61        \\dfrac{y}{|y|}, & |y| > 0 \\\\
 62        0, & |y| = 0
 63        \\end{cases}
 64
 65    The Sato error is:
 66
 67    .. math::
 68        e(k) = y(k) - \\zeta\\, \\mathrm{csgn}(y(k)).
 69
 70    The coefficient update used here is:
 71
 72    .. math::
 73        w(k+1) = w(k) - \\mu\\, e^*(k)\\, x_k.
 74
 75    Dispersion constant
 76    ~~~~~~~~~~~~~~~~~~~
 77    In this implementation, the dispersion constant is estimated from the input
 78    using sample moments:
 79
 80    .. math::
 81        \\zeta \\approx \\frac{\\mathbb{E}[|x|^2]}{\\mathbb{E}[|x|]}
 82        \\approx \\frac{\\frac{1}{N}\\sum_k |x(k)|^2}
 83                     {\\frac{1}{N}\\sum_k |x(k)|},
 84
 85    with a small ``safe_eps`` to avoid division by zero.
 86
 87    Numerical stability
 88    ~~~~~~~~~~~~~~~~~~~
 89    To avoid instability when ``|y(k)|`` is very small, this implementation
 90    sets ``csgn(y(k)) = 0`` when ``|y(k)| <= safe_eps``.
 91
 92    References
 93    ----------
 94    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
 95       Implementation*, 5th ed., Algorithm 13.3.
 96    """
 97
 98    supports_complex: bool = True
 99    step_size: float
100    n_coeffs: int
101
102    def __init__(
103        self,
104        filter_order: int = 5,
105        step_size: float = 0.01,
106        w_init: Optional[Union[np.ndarray, list]] = None,
107    ) -> None:
108        super().__init__(filter_order, w_init=w_init)
109        self.step_size = float(step_size)
110        self.n_coeffs = int(filter_order + 1)
111
112    def optimize(
113        self,
114        input_signal: Union[np.ndarray, list],
115        desired_signal: Optional[Union[np.ndarray, list]] = None,
116        verbose: bool = False,
117        return_internal_states: bool = False,
118        safe_eps: float = 1e-12,
119    ) -> OptimizationResult:
120        """
121        Executes the Sato adaptation loop over an input sequence.
122
123        Parameters
124        ----------
125        input_signal : array_like of complex
126            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
127        desired_signal : None, optional
128            Ignored. This is a blind algorithm: no desired reference is used.
129        verbose : bool, optional
130            If True, prints the total runtime after completion.
131        return_internal_states : bool, optional
132            If True, includes internal signals in ``result.extra``:
133            ``"dispersion_constant"`` (estimated ``zeta``) and
134            ``"sato_sign_track"`` (trajectory of ``csgn(y(k))`` with shape
135            ``(N,)``).
136        safe_eps : float, optional
137            Small epsilon used to avoid division by zero when estimating
138            ``zeta`` and to gate the computation of ``csgn(y(k))`` when ``|y(k)|``
139            is close to zero. Default is 1e-12.
140
141        Returns
142        -------
143        OptimizationResult
144            Result object with fields:
145            - outputs : ndarray of complex, shape ``(N,)``
146                Output sequence ``y[k]``.
147            - errors : ndarray of complex, shape ``(N,)``
148                Sato error sequence ``e[k] = y(k) - zeta*csgn(y(k))``.
149            - coefficients : ndarray of complex
150                Coefficient history recorded by the base class.
151            - error_type : str
152                Set to ``"blind_sato"``.
153            - extra : dict, optional
154                Present only if ``return_internal_states=True``.
155        """
156        tic: float = time()
157
158        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
159        n_samples: int = int(x.size)
160
161        num: float = float(np.mean(np.abs(x) ** 2))
162        den: float = float(np.mean(np.abs(x)))
163        dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0
164
165        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
166        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
167
168        sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
169
170        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
171        x_padded[self.filter_order:] = x
172
173        for k in range(n_samples):
174            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
175
176            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
177            outputs[k] = y_k
178
179            mag: float = float(np.abs(y_k))
180            sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j)
181
182            if return_internal_states and sign_track is not None:
183                sign_track[k] = sato_sign
184
185            e_k: complex = y_k - sato_sign * dispersion_constant
186            errors[k] = e_k
187
188            self.w = self.w - self.step_size * np.conj(e_k) * x_k
189            self._record_history()
190
191        runtime_s: float = float(time() - tic)
192        if verbose:
193            print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms")
194
195        extra: Optional[Dict[str, Any]] = None
196        if return_internal_states:
197            extra = {
198                "sato_sign_track": sign_track,
199                "dispersion_constant": dispersion_constant,
200            }
201
202        return self._pack_results(
203            outputs=outputs,
204            errors=errors,
205            runtime_s=runtime_s,
206            error_type="blind_sato",
207            extra=extra,
208        )

Sato blind adaptive algorithm (complex-valued).

The Sato criterion is an early blind equalization method particularly associated with multilevel PAM/QAM-type signals. It adapts an FIR equalizer by pulling the output toward a fixed magnitude level through the complex sign function, using a dispersion constant zeta.

This implementation follows Diniz (Alg. 13.3) and estimates zeta from the input sequence via sample moments.

Parameters

filter_order : int, optional FIR filter order M. The number of coefficients is M + 1. Default is 5. step_size : float, optional Adaptation step size mu. Default is 0.01. w_init : array_like of complex, optional Initial coefficient vector w(0) with shape (M + 1,). If None, initializes with zeros.

Notes

Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the output:

$$y(k) = w^H(k) x_k.$$

Define the complex sign function (unit-circle projection):

$$\mathrm{csgn}(y) = \begin{cases} \dfrac{y}{|y|}, & |y| > 0 \ 0, & |y| = 0 \end{cases}$$

The Sato error is:

$$e(k) = y(k) - \zeta\, \mathrm{csgn}(y(k)).$$

The coefficient update used here is:

$$w(k+1) = w(k) - \mu\, e^*(k)\, x_k.$$

Dispersion constant ~~~~~~~ In this implementation, the dispersion constant is estimated from the input using sample moments:

$$\zeta \approx \frac{\mathbb{E}[|x|^2]}{\mathbb{E}[|x|]} \approx \frac{\frac{1}{N}\sum_k |x(k)|^2} {\frac{1}{N}\sum_k |x(k)|},$$

with a small safe_eps to avoid division by zero.

Numerical stability ~~~~~~~ To avoid instability when |y(k)| is very small, this implementation sets csgn(y(k)) = 0 when |y(k)| <= safe_eps.

References


Sato( filter_order: int = 5, step_size: float = 0.01, w_init: Union[numpy.ndarray, list, NoneType] = None)
102    def __init__(
103        self,
104        filter_order: int = 5,
105        step_size: float = 0.01,
106        w_init: Optional[Union[np.ndarray, list]] = None,
107    ) -> None:
108        super().__init__(filter_order, w_init=w_init)
109        self.step_size = float(step_size)
110        self.n_coeffs = int(filter_order + 1)
supports_complex: bool = True
step_size: float
n_coeffs: int
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list, NoneType] = None, verbose: bool = False, return_internal_states: bool = False, safe_eps: float = 1e-12) -> pydaptivefiltering.base.OptimizationResult:
112    def optimize(
113        self,
114        input_signal: Union[np.ndarray, list],
115        desired_signal: Optional[Union[np.ndarray, list]] = None,
116        verbose: bool = False,
117        return_internal_states: bool = False,
118        safe_eps: float = 1e-12,
119    ) -> OptimizationResult:
120        """
121        Executes the Sato adaptation loop over an input sequence.
122
123        Parameters
124        ----------
125        input_signal : array_like of complex
126            Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened).
127        desired_signal : None, optional
128            Ignored. This is a blind algorithm: no desired reference is used.
129        verbose : bool, optional
130            If True, prints the total runtime after completion.
131        return_internal_states : bool, optional
132            If True, includes internal signals in ``result.extra``:
133            ``"dispersion_constant"`` (estimated ``zeta``) and
134            ``"sato_sign_track"`` (trajectory of ``csgn(y(k))`` with shape
135            ``(N,)``).
136        safe_eps : float, optional
137            Small epsilon used to avoid division by zero when estimating
138            ``zeta`` and to gate the computation of ``csgn(y(k))`` when ``|y(k)|``
139            is close to zero. Default is 1e-12.
140
141        Returns
142        -------
143        OptimizationResult
144            Result object with fields:
145            - outputs : ndarray of complex, shape ``(N,)``
146                Output sequence ``y[k]``.
147            - errors : ndarray of complex, shape ``(N,)``
148                Sato error sequence ``e[k] = y(k) - zeta*csgn(y(k))``.
149            - coefficients : ndarray of complex
150                Coefficient history recorded by the base class.
151            - error_type : str
152                Set to ``"blind_sato"``.
153            - extra : dict, optional
154                Present only if ``return_internal_states=True``.
155        """
156        tic: float = time()
157
158        x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel()
159        n_samples: int = int(x.size)
160
161        num: float = float(np.mean(np.abs(x) ** 2))
162        den: float = float(np.mean(np.abs(x)))
163        dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0
164
165        outputs: np.ndarray = np.zeros(n_samples, dtype=complex)
166        errors: np.ndarray = np.zeros(n_samples, dtype=complex)
167
168        sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None
169
170        x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex)
171        x_padded[self.filter_order:] = x
172
173        for k in range(n_samples):
174            x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1]
175
176            y_k: complex = complex(np.dot(np.conj(self.w), x_k))
177            outputs[k] = y_k
178
179            mag: float = float(np.abs(y_k))
180            sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j)
181
182            if return_internal_states and sign_track is not None:
183                sign_track[k] = sato_sign
184
185            e_k: complex = y_k - sato_sign * dispersion_constant
186            errors[k] = e_k
187
188            self.w = self.w - self.step_size * np.conj(e_k) * x_k
189            self._record_history()
190
191        runtime_s: float = float(time() - tic)
192        if verbose:
193            print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms")
194
195        extra: Optional[Dict[str, Any]] = None
196        if return_internal_states:
197            extra = {
198                "sato_sign_track": sign_track,
199                "dispersion_constant": dispersion_constant,
200            }
201
202        return self._pack_results(
203            outputs=outputs,
204            errors=errors,
205            runtime_s=runtime_s,
206            error_type="blind_sato",
207            extra=extra,
208        )

Executes the Sato adaptation loop over an input sequence.

Parameters

input_signal : array_like of complex Input sequence x[k] with shape (N,) (will be flattened). desired_signal : None, optional Ignored. This is a blind algorithm: no desired reference is used. verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, includes internal signals in result.extra: "dispersion_constant" (estimated zeta) and "sato_sign_track" (trajectory of csgn(y(k)) with shape (N,)). safe_eps : float, optional Small epsilon used to avoid division by zero when estimating zeta and to gate the computation of csgn(y(k)) when |y(k)| is close to zero. Default is 1e-12.

Returns

OptimizationResult Result object with fields: - outputs : ndarray of complex, shape (N,) Output sequence y[k]. - errors : ndarray of complex, shape (N,) Sato error sequence e[k] = y(k) - zeta*csgn(y(k)). - coefficients : ndarray of complex Coefficient history recorded by the base class. - error_type : str Set to "blind_sato". - extra : dict, optional Present only if return_internal_states=True.

class Kalman(pydaptivefiltering.AdaptiveFilter):
114class Kalman(AdaptiveFilter):
115    """
116    Kalman filter for state estimation (real or complex-valued).
117
118    Implements the discrete-time Kalman filter recursion for linear state-space
119    models with additive process and measurement noise. Matrices may be constant
120    (single ``ndarray``) or time-varying (a sequence of arrays indexed by ``k``).
121
122    The model used is:
123
124    .. math::
125        x(k) = A(k-1) x(k-1) + B(k) n(k),
126
127    .. math::
128        y(k) = C^T(k) x(k) + n_1(k),
129
130    where :math:`n(k)` is the process noise with covariance :math:`R_n(k)` and
131    :math:`n_1(k)` is the measurement noise with covariance :math:`R_{n1}(k)`.
132
133    Notes
134    -----
135    API integration
136    ~~~~~~~~~~~~~~~
137    This class inherits from :class:`~pydaptivefiltering.base.AdaptiveFilter` to
138    share a common interface. Here, the "weights" are the state estimate:
139    ``self.w`` stores the current state vector (flattened), and
140    ``self.w_history`` stores the covariance matrices over time.
141
142    Time-varying matrices
143    ~~~~~~~~~~~~~~~~~~~~~
144    Any of ``A``, ``C_T``, ``B``, ``Rn``, ``Rn1`` may be provided either as:
145    - a constant ``ndarray``, used for all k; or
146    - a sequence (list/tuple) of ``ndarray``, where element ``k`` is used at time k.
147
148    Dimensions
149    ~~~~~~~~~~
150    Let ``n`` be the state dimension, ``p`` the measurement dimension, and ``q``
151    the process-noise dimension. Then:
152
153    - ``A(k)`` has shape ``(n, n)``
154    - ``C_T(k)`` has shape ``(p, n)``  (note: this is :math:`C^T`)
155    - ``B(k)`` has shape ``(n, q)``
156    - ``Rn(k)`` has shape ``(q, q)``
157    - ``Rn1(k)`` has shape ``(p, p)``
158
159    If ``B`` is not provided, the implementation uses ``B = I`` (thus ``q = n``),
160    and expects ``Rn`` to be shape ``(n, n)``.
161
162    Parameters
163    ----------
164    A : ndarray or Sequence[ndarray]
165        State transition matrix :math:`A(k-1)` with shape ``(n, n)``.
166    C_T : ndarray or Sequence[ndarray]
167        Measurement matrix :math:`C^T(k)` with shape ``(p, n)``.
168    Rn : ndarray or Sequence[ndarray]
169        Process noise covariance :math:`R_n(k)` with shape ``(q, q)``.
170    Rn1 : ndarray or Sequence[ndarray]
171        Measurement noise covariance :math:`R_{n1}(k)` with shape ``(p, p)``.
172    B : ndarray or Sequence[ndarray], optional
173        Process noise input matrix :math:`B(k)` with shape ``(n, q)``.
174        If None, uses identity.
175    x_init : ndarray, optional
176        Initial state estimate :math:`x(0|0)`. Accepts shapes compatible with
177        ``(n,)``, ``(n,1)``, or ``(1,n)``. If None, initializes with zeros.
178    Re_init : ndarray, optional
179        Initial estimation error covariance :math:`R_e(0|0)` with shape ``(n, n)``.
180        If None, initializes with identity.
181
182    References
183    ----------
184    .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical
185       Implementation*, Algorithm 17.1.
186    """
187    supports_complex: bool = True
188
189    A: Union[np.ndarray, Sequence[np.ndarray]]
190    C_T: Union[np.ndarray, Sequence[np.ndarray]]
191    Rn: Union[np.ndarray, Sequence[np.ndarray]]
192    Rn1: Union[np.ndarray, Sequence[np.ndarray]]
193    B: Optional[Union[np.ndarray, Sequence[np.ndarray]]]
194
195    x: np.ndarray
196    Re: np.ndarray
197
198    def __init__(
199        self,
200        A: Union[np.ndarray, Sequence[np.ndarray]],
201        C_T: Union[np.ndarray, Sequence[np.ndarray]],
202        Rn: Union[np.ndarray, Sequence[np.ndarray]],
203        Rn1: Union[np.ndarray, Sequence[np.ndarray]],
204        B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None,
205        x_init: Optional[np.ndarray] = None,
206        Re_init: Optional[np.ndarray] = None,
207    ) -> None:
208        A0 = _mat_at_k(A, 0)
209        if A0.ndim != 2 or A0.shape[0] != A0.shape[1]:
210            raise ValueError(f"A must be square (n,n). Got {A0.shape}.")
211        n = int(A0.shape[0])
212
213        super().__init__(filter_order=n - 1, w_init=None)
214
215        self.A = A
216        self.C_T = C_T
217        self.Rn = Rn
218        self.Rn1 = Rn1
219        self.B = B
220
221        dtype = np.result_type(
222            A0, _mat_at_k(C_T, 0), _mat_at_k(Rn, 0), _mat_at_k(Rn1, 0)
223        )
224        dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128
225
226        self._dtype = dtype
227        self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype)
228        self.w = np.zeros(self.filter_order + 1, dtype=self._dtype)
229
230        if x_init is None:
231            x0 = np.zeros((n, 1), dtype=dtype)
232        else:
233            x0 = _as_2d_col(np.asarray(x_init, dtype=dtype))
234            if x0.shape[0] != n:
235                raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.")
236        self.x = x0
237
238        if Re_init is None:
239            Re0 = np.eye(n, dtype=dtype)
240        else:
241            Re0 = np.asarray(Re_init, dtype=dtype)
242            if Re0.shape != (n, n):
243                raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.")
244        self.Re = Re0
245
246        self.w = self.x[:, 0].copy()
247
248        self.w_history = []
249
250    def _validate_step_shapes(
251        self,
252        A: np.ndarray,
253        C_T: np.ndarray,
254        Rn: np.ndarray,
255        Rn1: np.ndarray,
256        B: np.ndarray,
257    ) -> None:
258        """Validate per-iteration matrix shapes.
259
260        Raises
261        ------
262        ValueError
263            If any matrix has an unexpected shape for the current state dimension.
264        """
265        n = int(self.x.shape[0])
266        if A.shape != (n, n):
267            raise ValueError(f"A(k) must be {(n,n)}. Got {A.shape}.")
268        if C_T.ndim != 2 or C_T.shape[1] != n:
269            raise ValueError(f"C_T(k) must be (p,n) with n={n}. Got {C_T.shape}.")
270        p = int(C_T.shape[0])
271        if Rn1.shape != (p, p):
272            raise ValueError(f"Rn1(k) must be {(p,p)}. Got {Rn1.shape}.")
273        if B.ndim != 2 or B.shape[0] != n:
274            raise ValueError(f"B(k) must be (n,q) with n={n}. Got {B.shape}.")
275        q = int(B.shape[1])
276        if Rn.shape != (q, q):
277            raise ValueError(f"Rn(k) must be {(q,q)}. Got {Rn.shape}.")
278
279    def optimize(
280        self,
281        input_signal: ArrayLike,
282        desired_signal: Optional[ArrayLike] = None,
283        verbose: bool = False,
284        return_internal_states: bool = False,
285        safe_eps: float = 1e-12,
286    ) -> OptimizationResult:
287        """
288        Executes the Kalman recursion for a sequence of measurements ``y[k]``.
289
290        Parameters
291        ----------
292        input_signal : array_like
293            Measurement sequence ``y[k]``. Accepted shapes:
294            - ``(N,)``       for scalar measurements
295            - ``(N, p)``     for p-dimensional measurements
296            - ``(N, p, 1)``  also accepted (squeezed to ``(N, p)``)
297        desired_signal : array_like, optional
298            Ignored (kept only for API standardization).
299        verbose : bool, optional
300            If True, prints the total runtime after completion.
301        return_internal_states : bool, optional
302            If True, returns selected internal values in ``result.extra``.
303        safe_eps : float, optional
304            Small positive value used to regularize the innovation covariance
305            matrix if a linear solve fails (numerical stabilization).
306
307        Returns
308        -------
309        OptimizationResult
310            outputs : ndarray
311                State estimates ``x(k|k)``, shape ``(N, n)``.
312            errors : ndarray
313                Innovations ``v(k) = y(k) - C^T(k) x(k|k-1)``, shape ``(N, p)``.
314            coefficients : ndarray
315                Covariance history ``R_e(k|k)``, shape ``(N, n, n)``.
316            error_type : str
317                ``"innovation"``.
318            extra : dict, optional
319                Present only if ``return_internal_states=True``. See below.
320
321        Extra (when return_internal_states=True)
322        --------------------------------------
323        kalman_gain_last : ndarray
324            Kalman gain ``K`` at the last iteration, shape ``(n, p)``.
325        predicted_state_last : ndarray
326            Predicted state ``x(k|k-1)`` at the last iteration, shape ``(n,)``.
327        predicted_cov_last : ndarray
328            Predicted covariance ``R_e(k|k-1)`` at the last iteration, shape ``(n, n)``.
329        innovation_cov_last : ndarray
330            Innovation covariance ``S`` at the last iteration, shape ``(p, p)``.
331        safe_eps : float
332            The stabilization epsilon used when regularizing ``S``.
333        """
334        t0 = perf_counter()
335
336        y_mat = _as_meas_matrix(np.asarray(input_signal))
337        y_mat = y_mat.astype(self._dtype, copy=False)
338
339        N = int(y_mat.shape[0])
340        n = int(self.x.shape[0])
341        p_dim = int(y_mat.shape[1])
342
343        outputs = np.zeros((N, n), dtype=self._dtype)
344        errors = np.zeros((N, p_dim), dtype=self._dtype)
345
346        I_n = np.eye(n, dtype=self._dtype)
347
348        self.w_history = []
349
350        last_K: Optional[np.ndarray] = None
351        last_x_pred: Optional[np.ndarray] = None
352        last_Re_pred: Optional[np.ndarray] = None
353        last_S: Optional[np.ndarray] = None
354
355        for k in range(N):
356            A_k = np.asarray(_mat_at_k(self.A, k), dtype=self._dtype)
357            C_T_k = np.asarray(_mat_at_k(self.C_T, k), dtype=self._dtype)
358            Rn_k = np.asarray(_mat_at_k(self.Rn, k), dtype=self._dtype)
359            Rn1_k = np.asarray(_mat_at_k(self.Rn1, k), dtype=self._dtype)
360            B_k = np.asarray(
361                _mat_at_k(self.B, k) if self.B is not None else I_n,
362                dtype=self._dtype,
363            )
364
365            self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k)
366
367            y_k = _as_2d_col(y_mat[k]).astype(self._dtype, copy=False)
368            C_k = C_T_k.conj().T
369
370            x_pred = A_k @ self.x
371            Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T)
372
373            e_k = y_k - (C_T_k @ x_pred)
374
375            S = (C_T_k @ Re_pred @ C_k) + Rn1_k
376
377            RC = Re_pred @ C_k
378            try:
379                K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T
380            except np.linalg.LinAlgError:
381                S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype))
382                K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T
383
384            self.x = x_pred + (K @ e_k)
385            self.Re = (I_n - (K @ C_T_k)) @ Re_pred
386
387            outputs[k, :] = self.x[:, 0]
388            errors[k, :] = e_k[:, 0]
389
390            self.w_history.append(self.Re.copy())
391
392            self.w = self.x[:, 0].copy()
393
394            last_K = K
395            last_x_pred = x_pred
396            last_Re_pred = Re_pred
397            last_S = S
398
399        runtime_s = float(perf_counter() - t0)
400        if verbose:
401            print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms")
402
403        extra: Optional[Dict[str, Any]] = None
404        if return_internal_states:
405            extra = {
406                "kalman_gain_last": last_K,
407                "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(),
408                "predicted_cov_last": last_Re_pred,
409                "innovation_cov_last": last_S,
410                "safe_eps": float(safe_eps),
411            }
412
413        return self._pack_results(
414            outputs=outputs,
415            errors=errors,
416            runtime_s=runtime_s,
417            error_type="innovation",
418            extra=extra,
419        )

Kalman filter for state estimation (real or complex-valued).

Implements the discrete-time Kalman filter recursion for linear state-space models with additive process and measurement noise. Matrices may be constant (single ndarray) or time-varying (a sequence of arrays indexed by k).

The model used is:

$$x(k) = A(k-1) x(k-1) + B(k) n(k),$$

$$y(k) = C^T(k) x(k) + n_1(k),$$

where \( n(k) \) is the process noise with covariance \( R_n(k) \) and \( n_1(k) \) is the measurement noise with covariance \( R_{n1}(k) \).

Notes

API integration ~~~ This class inherits from ~pydaptivefiltering.base.AdaptiveFilter to share a common interface. Here, the "weights" are the state estimate: self.w stores the current state vector (flattened), and self.w_history stores the covariance matrices over time.

Time-varying matrices ~~~~~ Any of A, C_T, B, Rn, Rn1 may be provided either as:

  • a constant ndarray, used for all k; or
  • a sequence (list/tuple) of ndarray, where element k is used at time k.

Dimensions ~~ Let n be the state dimension, p the measurement dimension, and q the process-noise dimension. Then:

  • A(k) has shape (n, n)
  • C_T(k) has shape (p, n) (note: this is \( C^T \))
  • B(k) has shape (n, q)
  • Rn(k) has shape (q, q)
  • Rn1(k) has shape (p, p)

If B is not provided, the implementation uses B = I (thus q = n), and expects Rn to be shape (n, n).

Parameters

A : ndarray or Sequence[ndarray] State transition matrix \( A(k-1) \) with shape (n, n). C_T : ndarray or Sequence[ndarray] Measurement matrix \( C^T(k) \) with shape (p, n). Rn : ndarray or Sequence[ndarray] Process noise covariance \( R_n(k) \) with shape (q, q). Rn1 : ndarray or Sequence[ndarray] Measurement noise covariance \( R_{n1}(k) \) with shape (p, p). B : ndarray or Sequence[ndarray], optional Process noise input matrix \( B(k) \) with shape (n, q). If None, uses identity. x_init : ndarray, optional Initial state estimate \( x(0|0) \). Accepts shapes compatible with (n,), (n,1), or (1,n). If None, initializes with zeros. Re_init : ndarray, optional Initial estimation error covariance \( R_e(0|0) \) with shape (n, n). If None, initializes with identity.

References


Kalman( A: Union[numpy.ndarray, Sequence[numpy.ndarray]], C_T: Union[numpy.ndarray, Sequence[numpy.ndarray]], Rn: Union[numpy.ndarray, Sequence[numpy.ndarray]], Rn1: Union[numpy.ndarray, Sequence[numpy.ndarray]], B: Union[numpy.ndarray, Sequence[numpy.ndarray], NoneType] = None, x_init: Optional[numpy.ndarray] = None, Re_init: Optional[numpy.ndarray] = None)
198    def __init__(
199        self,
200        A: Union[np.ndarray, Sequence[np.ndarray]],
201        C_T: Union[np.ndarray, Sequence[np.ndarray]],
202        Rn: Union[np.ndarray, Sequence[np.ndarray]],
203        Rn1: Union[np.ndarray, Sequence[np.ndarray]],
204        B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None,
205        x_init: Optional[np.ndarray] = None,
206        Re_init: Optional[np.ndarray] = None,
207    ) -> None:
208        A0 = _mat_at_k(A, 0)
209        if A0.ndim != 2 or A0.shape[0] != A0.shape[1]:
210            raise ValueError(f"A must be square (n,n). Got {A0.shape}.")
211        n = int(A0.shape[0])
212
213        super().__init__(filter_order=n - 1, w_init=None)
214
215        self.A = A
216        self.C_T = C_T
217        self.Rn = Rn
218        self.Rn1 = Rn1
219        self.B = B
220
221        dtype = np.result_type(
222            A0, _mat_at_k(C_T, 0), _mat_at_k(Rn, 0), _mat_at_k(Rn1, 0)
223        )
224        dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128
225
226        self._dtype = dtype
227        self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype)
228        self.w = np.zeros(self.filter_order + 1, dtype=self._dtype)
229
230        if x_init is None:
231            x0 = np.zeros((n, 1), dtype=dtype)
232        else:
233            x0 = _as_2d_col(np.asarray(x_init, dtype=dtype))
234            if x0.shape[0] != n:
235                raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.")
236        self.x = x0
237
238        if Re_init is None:
239            Re0 = np.eye(n, dtype=dtype)
240        else:
241            Re0 = np.asarray(Re_init, dtype=dtype)
242            if Re0.shape != (n, n):
243                raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.")
244        self.Re = Re0
245
246        self.w = self.x[:, 0].copy()
247
248        self.w_history = []
supports_complex: bool = True
A: Union[numpy.ndarray, Sequence[numpy.ndarray]]
C_T: Union[numpy.ndarray, Sequence[numpy.ndarray]]
Rn: Union[numpy.ndarray, Sequence[numpy.ndarray]]
Rn1: Union[numpy.ndarray, Sequence[numpy.ndarray]]
B: Union[numpy.ndarray, Sequence[numpy.ndarray], NoneType]
x: numpy.ndarray
Re: numpy.ndarray
regressor
w
w_history
def optimize( self, input_signal: Union[numpy.ndarray, list], desired_signal: Union[numpy.ndarray, list, NoneType] = None, verbose: bool = False, return_internal_states: bool = False, safe_eps: float = 1e-12) -> pydaptivefiltering.base.OptimizationResult:
279    def optimize(
280        self,
281        input_signal: ArrayLike,
282        desired_signal: Optional[ArrayLike] = None,
283        verbose: bool = False,
284        return_internal_states: bool = False,
285        safe_eps: float = 1e-12,
286    ) -> OptimizationResult:
287        """
288        Executes the Kalman recursion for a sequence of measurements ``y[k]``.
289
290        Parameters
291        ----------
292        input_signal : array_like
293            Measurement sequence ``y[k]``. Accepted shapes:
294            - ``(N,)``       for scalar measurements
295            - ``(N, p)``     for p-dimensional measurements
296            - ``(N, p, 1)``  also accepted (squeezed to ``(N, p)``)
297        desired_signal : array_like, optional
298            Ignored (kept only for API standardization).
299        verbose : bool, optional
300            If True, prints the total runtime after completion.
301        return_internal_states : bool, optional
302            If True, returns selected internal values in ``result.extra``.
303        safe_eps : float, optional
304            Small positive value used to regularize the innovation covariance
305            matrix if a linear solve fails (numerical stabilization).
306
307        Returns
308        -------
309        OptimizationResult
310            outputs : ndarray
311                State estimates ``x(k|k)``, shape ``(N, n)``.
312            errors : ndarray
313                Innovations ``v(k) = y(k) - C^T(k) x(k|k-1)``, shape ``(N, p)``.
314            coefficients : ndarray
315                Covariance history ``R_e(k|k)``, shape ``(N, n, n)``.
316            error_type : str
317                ``"innovation"``.
318            extra : dict, optional
319                Present only if ``return_internal_states=True``. See below.
320
321        Extra (when return_internal_states=True)
322        --------------------------------------
323        kalman_gain_last : ndarray
324            Kalman gain ``K`` at the last iteration, shape ``(n, p)``.
325        predicted_state_last : ndarray
326            Predicted state ``x(k|k-1)`` at the last iteration, shape ``(n,)``.
327        predicted_cov_last : ndarray
328            Predicted covariance ``R_e(k|k-1)`` at the last iteration, shape ``(n, n)``.
329        innovation_cov_last : ndarray
330            Innovation covariance ``S`` at the last iteration, shape ``(p, p)``.
331        safe_eps : float
332            The stabilization epsilon used when regularizing ``S``.
333        """
334        t0 = perf_counter()
335
336        y_mat = _as_meas_matrix(np.asarray(input_signal))
337        y_mat = y_mat.astype(self._dtype, copy=False)
338
339        N = int(y_mat.shape[0])
340        n = int(self.x.shape[0])
341        p_dim = int(y_mat.shape[1])
342
343        outputs = np.zeros((N, n), dtype=self._dtype)
344        errors = np.zeros((N, p_dim), dtype=self._dtype)
345
346        I_n = np.eye(n, dtype=self._dtype)
347
348        self.w_history = []
349
350        last_K: Optional[np.ndarray] = None
351        last_x_pred: Optional[np.ndarray] = None
352        last_Re_pred: Optional[np.ndarray] = None
353        last_S: Optional[np.ndarray] = None
354
355        for k in range(N):
356            A_k = np.asarray(_mat_at_k(self.A, k), dtype=self._dtype)
357            C_T_k = np.asarray(_mat_at_k(self.C_T, k), dtype=self._dtype)
358            Rn_k = np.asarray(_mat_at_k(self.Rn, k), dtype=self._dtype)
359            Rn1_k = np.asarray(_mat_at_k(self.Rn1, k), dtype=self._dtype)
360            B_k = np.asarray(
361                _mat_at_k(self.B, k) if self.B is not None else I_n,
362                dtype=self._dtype,
363            )
364
365            self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k)
366
367            y_k = _as_2d_col(y_mat[k]).astype(self._dtype, copy=False)
368            C_k = C_T_k.conj().T
369
370            x_pred = A_k @ self.x
371            Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T)
372
373            e_k = y_k - (C_T_k @ x_pred)
374
375            S = (C_T_k @ Re_pred @ C_k) + Rn1_k
376
377            RC = Re_pred @ C_k
378            try:
379                K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T
380            except np.linalg.LinAlgError:
381                S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype))
382                K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T
383
384            self.x = x_pred + (K @ e_k)
385            self.Re = (I_n - (K @ C_T_k)) @ Re_pred
386
387            outputs[k, :] = self.x[:, 0]
388            errors[k, :] = e_k[:, 0]
389
390            self.w_history.append(self.Re.copy())
391
392            self.w = self.x[:, 0].copy()
393
394            last_K = K
395            last_x_pred = x_pred
396            last_Re_pred = Re_pred
397            last_S = S
398
399        runtime_s = float(perf_counter() - t0)
400        if verbose:
401            print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms")
402
403        extra: Optional[Dict[str, Any]] = None
404        if return_internal_states:
405            extra = {
406                "kalman_gain_last": last_K,
407                "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(),
408                "predicted_cov_last": last_Re_pred,
409                "innovation_cov_last": last_S,
410                "safe_eps": float(safe_eps),
411            }
412
413        return self._pack_results(
414            outputs=outputs,
415            errors=errors,
416            runtime_s=runtime_s,
417            error_type="innovation",
418            extra=extra,
419        )

Executes the Kalman recursion for a sequence of measurements y[k].

Parameters

input_signal : array_like Measurement sequence y[k]. Accepted shapes: - (N,) for scalar measurements - (N, p) for p-dimensional measurements - (N, p, 1) also accepted (squeezed to (N, p)) desired_signal : array_like, optional Ignored (kept only for API standardization). verbose : bool, optional If True, prints the total runtime after completion. return_internal_states : bool, optional If True, returns selected internal values in result.extra. safe_eps : float, optional Small positive value used to regularize the innovation covariance matrix if a linear solve fails (numerical stabilization).

Returns

OptimizationResult outputs : ndarray State estimates x(k|k), shape (N, n). errors : ndarray Innovations v(k) = y(k) - C^T(k) x(k|k-1), shape (N, p). coefficients : ndarray Covariance history R_e(k|k), shape (N, n, n). error_type : str "innovation". extra : dict, optional Present only if return_internal_states=True. See below.

Extra (when return_internal_states=True)

kalman_gain_last : ndarray Kalman gain K at the last iteration, shape (n, p). predicted_state_last : ndarray Predicted state x(k|k-1) at the last iteration, shape (n,). predicted_cov_last : ndarray Predicted covariance R_e(k|k-1) at the last iteration, shape (n, n). innovation_cov_last : ndarray Innovation covariance S at the last iteration, shape (p, p). safe_eps : float The stabilization epsilon used when regularizing S.

def info():
35def info():
36    """Imprime informações sobre a cobertura de algoritmos da biblioteca."""
37    print("\n" + "="*70)
38    print("      PyDaptive Filtering - Complete Library Overview")
39    print("      Reference: 'Adaptive Filtering' by Paulo S. R. Diniz")
40    print("="*70)
41    sections = {
42        "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain",
43        "Cap 5 (RLS)": "Standard RLS, Alternative RLS",
44        "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP",
45        "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS",
46        "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS",
47        "Cap 9 (QR)": "QR-Decomposition Based RLS",
48        "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR",
49        "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS",
50        "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS",
51        "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection",
52        "Cap 17 (Kalman)": "Kalman Filter",
53    }
54    for cap, algs in sections.items():
55        print(f"\n{cap:25}: {algs}")
56    
57    print("\n" + "-"*70)
58    print("Usage example: from pydaptivefiltering import LMS")
59    print("Documentation: help(pydaptivefiltering.LMS)")
60    print("="*70 + "\n")

Imprime informações sobre a cobertura de algoritmos da biblioteca.